]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
[SCSI] Merge branch 'linus'
authorJames Bottomley <James.Bottomley@HansenPartnership.com>
Fri, 12 Jun 2009 15:02:03 +0000 (10:02 -0500)
committerJames Bottomley <James.Bottomley@HansenPartnership.com>
Fri, 12 Jun 2009 15:02:03 +0000 (10:02 -0500)
Conflicts:
drivers/message/fusion/mptsas.c

fixed up conflict between req->data_len accessors and mptsas driver updates.

Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
141 files changed:
drivers/infiniband/ulp/iser/iscsi_iser.c
drivers/message/fusion/mptbase.c
drivers/message/fusion/mptbase.h
drivers/message/fusion/mptctl.c
drivers/message/fusion/mptdebug.h
drivers/message/fusion/mptfc.c
drivers/message/fusion/mptsas.c
drivers/message/fusion/mptsas.h
drivers/message/fusion/mptscsih.c
drivers/message/fusion/mptscsih.h
drivers/message/fusion/mptspi.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/bnx2.c
drivers/net/bnx2.h
drivers/net/cnic.c [new file with mode: 0644]
drivers/net/cnic.h [new file with mode: 0644]
drivers/net/cnic_defs.h [new file with mode: 0644]
drivers/net/cnic_if.h [new file with mode: 0644]
drivers/s390/scsi/zfcp_ccw.c
drivers/s390/scsi/zfcp_dbf.c
drivers/s390/scsi/zfcp_def.h
drivers/s390/scsi/zfcp_erp.c
drivers/s390/scsi/zfcp_ext.h
drivers/s390/scsi/zfcp_fc.c
drivers/s390/scsi/zfcp_fsf.c
drivers/s390/scsi/zfcp_scsi.c
drivers/scsi/Kconfig
drivers/scsi/Makefile
drivers/scsi/NCR_D700.c
drivers/scsi/bnx2i/57xx_iscsi_constants.h [new file with mode: 0644]
drivers/scsi/bnx2i/57xx_iscsi_hsi.h [new file with mode: 0644]
drivers/scsi/bnx2i/Kconfig [new file with mode: 0644]
drivers/scsi/bnx2i/Makefile [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i.h [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_hwi.c [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_init.c [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_iscsi.c [new file with mode: 0644]
drivers/scsi/bnx2i/bnx2i_sysfs.c [new file with mode: 0644]
drivers/scsi/cxgb3i/cxgb3i.h
drivers/scsi/cxgb3i/cxgb3i_iscsi.c
drivers/scsi/cxgb3i/cxgb3i_offload.c
drivers/scsi/cxgb3i/cxgb3i_offload.h
drivers/scsi/device_handler/scsi_dh_rdac.c
drivers/scsi/fcoe/fcoe.c
drivers/scsi/fcoe/fcoe.h
drivers/scsi/fcoe/libfcoe.c
drivers/scsi/fnic/fnic_main.c
drivers/scsi/gdth_proc.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvfc.h
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi/ibmvscsi.h
drivers/scsi/ibmvscsi/viosrp.h
drivers/scsi/ipr.c
drivers/scsi/libfc/fc_exch.c
drivers/scsi/libfc/fc_fcp.c
drivers/scsi/libfc/fc_rport.c
drivers/scsi/libiscsi.c
drivers/scsi/libiscsi_tcp.c
drivers/scsi/lpfc/lpfc.h
drivers/scsi/lpfc/lpfc_attr.c
drivers/scsi/lpfc/lpfc_crtn.h
drivers/scsi/lpfc/lpfc_ct.c
drivers/scsi/lpfc/lpfc_debugfs.c
drivers/scsi/lpfc/lpfc_disc.h
drivers/scsi/lpfc/lpfc_els.c
drivers/scsi/lpfc/lpfc_hbadisc.c
drivers/scsi/lpfc/lpfc_hw.h
drivers/scsi/lpfc/lpfc_hw4.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_init.c
drivers/scsi/lpfc/lpfc_logmsg.h
drivers/scsi/lpfc/lpfc_mbox.c
drivers/scsi/lpfc/lpfc_mem.c
drivers/scsi/lpfc/lpfc_nportdisc.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/lpfc/lpfc_scsi.h
drivers/scsi/lpfc/lpfc_sli.c
drivers/scsi/lpfc/lpfc_sli.h
drivers/scsi/lpfc/lpfc_sli4.h [new file with mode: 0644]
drivers/scsi/lpfc/lpfc_version.h
drivers/scsi/lpfc/lpfc_vport.c
drivers/scsi/mpt2sas/mpt2sas_base.h
drivers/scsi/mpt2sas/mpt2sas_ctl.c
drivers/scsi/mpt2sas/mpt2sas_scsih.c
drivers/scsi/mpt2sas/mpt2sas_transport.c
drivers/scsi/mvsas.c [deleted file]
drivers/scsi/mvsas/Kconfig [new file with mode: 0644]
drivers/scsi/mvsas/Makefile [new file with mode: 0644]
drivers/scsi/mvsas/mv_64xx.c [new file with mode: 0644]
drivers/scsi/mvsas/mv_64xx.h [new file with mode: 0644]
drivers/scsi/mvsas/mv_94xx.c [new file with mode: 0644]
drivers/scsi/mvsas/mv_94xx.h [new file with mode: 0644]
drivers/scsi/mvsas/mv_chips.h [new file with mode: 0644]
drivers/scsi/mvsas/mv_defs.h [new file with mode: 0644]
drivers/scsi/mvsas/mv_init.c [new file with mode: 0644]
drivers/scsi/mvsas/mv_sas.c [new file with mode: 0644]
drivers/scsi/mvsas/mv_sas.h [new file with mode: 0644]
drivers/scsi/osd/Kbuild
drivers/scsi/osd/Makefile [deleted file]
drivers/scsi/osd/osd_initiator.c
drivers/scsi/osd/osd_uld.c
drivers/scsi/qla1280.c
drivers/scsi/qla1280.h
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_dbg.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_fw.h
drivers/scsi/qla2xxx/qla_gbl.h
drivers/scsi/qla2xxx/qla_gs.c
drivers/scsi/qla2xxx/qla_init.c
drivers/scsi/qla2xxx/qla_iocb.c
drivers/scsi/qla2xxx/qla_isr.c
drivers/scsi/qla2xxx/qla_mbx.c
drivers/scsi/qla2xxx/qla_mid.c
drivers/scsi/qla2xxx/qla_os.c
drivers/scsi/qla2xxx/qla_sup.c
drivers/scsi/qla2xxx/qla_version.h
drivers/scsi/scsi.c
drivers/scsi/scsi_debug.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_transport_iscsi.c
drivers/scsi/sd.c
drivers/scsi/st.c
drivers/scsi/sym53c8xx_2/sym_glue.c
drivers/scsi/sym53c8xx_2/sym_hipd.c
drivers/scsi/sym53c8xx_2/sym_hipd.h
fs/exofs/common.h
fs/exofs/inode.c
fs/exofs/osd.c
include/linux/if_ether.h
include/scsi/fc/fc_fip.h
include/scsi/iscsi_if.h
include/scsi/libfc.h
include/scsi/libiscsi.h
include/scsi/osd_attributes.h
include/scsi/osd_initiator.h
include/scsi/osd_protocol.h
include/scsi/scsi_transport_iscsi.h

index 75223f50de586fa9dc08c77f4f0ccec0a73a6020..0ba6ec87629616e946a64aedad68d86a3b6d4010 100644 (file)
@@ -257,11 +257,8 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
 {
        struct iscsi_iser_task *iser_task = task->dd_data;
 
-       /*
-        * mgmt tasks do not need special cleanup and we do not
-        * allocate anything in the init task callout
-        */
-       if (!task->sc || task->state == ISCSI_TASK_PENDING)
+       /* mgmt tasks do not need special cleanup */
+       if (!task->sc)
                return;
 
        if (iser_task->status == ISER_TASK_STATUS_STARTED) {
@@ -517,7 +514,8 @@ iscsi_iser_conn_get_stats(struct iscsi_cls_conn *cls_conn, struct iscsi_stats *s
 }
 
 static struct iscsi_endpoint *
-iscsi_iser_ep_connect(struct sockaddr *dst_addr, int non_blocking)
+iscsi_iser_ep_connect(struct Scsi_Host *shost, struct sockaddr *dst_addr,
+                     int non_blocking)
 {
        int err;
        struct iser_conn *ib_conn;
index 5d496a99e034c9498c583957f7a6b028370e8c3c..44b9315044579639e9e1e10463887f866c38c9ef 100644 (file)
@@ -146,7 +146,6 @@ static MPT_EVHANDLER                 MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
 static MPT_RESETHANDLER                 MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
 static struct mpt_pci_driver   *MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
 
-static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq);
 
 /*
  *  Driver Callback Index's
@@ -159,7 +158,8 @@ static u8 last_drv_idx;
  *  Forward protos...
  */
 static irqreturn_t mpt_interrupt(int irq, void *bus_id);
-static int     mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
+static int     mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+               MPT_FRAME_HDR *reply);
 static int     mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
                        u32 *req, int replyBytes, u16 *u16reply, int maxwait,
                        int sleepFlag);
@@ -190,9 +190,9 @@ static int  mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
 static int     mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
 static void    mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
 static void    mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
-static void    mpt_timer_expired(unsigned long data);
 static void    mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc);
-static int     SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
+static int     SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch,
+       int sleepFlag);
 static int     SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
 static int     mpt_host_page_access_control(MPT_ADAPTER *ioc, u8 access_control_value, int sleepFlag);
 static int     mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init);
@@ -207,8 +207,8 @@ static int  procmpt_iocinfo_read(char *buf, char **start, off_t offset,
 #endif
 static void    mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
 
-//int          mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
-static int     ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers);
+static int     ProcessEventNotification(MPT_ADAPTER *ioc,
+               EventNotificationReply_t *evReply, int *evHandlers);
 static void    mpt_iocstatus_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
 static void    mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
 static void    mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info);
@@ -276,6 +276,56 @@ mpt_get_cb_idx(MPT_DRIVER_CLASS dclass)
        return 0;
 }
 
+/**
+ * mpt_is_discovery_complete - determine if discovery has completed
+ * @ioc: per adatper instance
+ *
+ * Returns 1 when discovery completed, else zero.
+ */
+static int
+mpt_is_discovery_complete(MPT_ADAPTER *ioc)
+{
+       ConfigExtendedPageHeader_t hdr;
+       CONFIGPARMS cfg;
+       SasIOUnitPage0_t *buffer;
+       dma_addr_t dma_handle;
+       int rc = 0;
+
+       memset(&hdr, 0, sizeof(ConfigExtendedPageHeader_t));
+       memset(&cfg, 0, sizeof(CONFIGPARMS));
+       hdr.PageVersion = MPI_SASIOUNITPAGE0_PAGEVERSION;
+       hdr.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
+       hdr.ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
+       cfg.cfghdr.ehdr = &hdr;
+       cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+       if ((mpt_config(ioc, &cfg)))
+               goto out;
+       if (!hdr.ExtPageLength)
+               goto out;
+
+       buffer = pci_alloc_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+           &dma_handle);
+       if (!buffer)
+               goto out;
+
+       cfg.physAddr = dma_handle;
+       cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+       if ((mpt_config(ioc, &cfg)))
+               goto out_free_consistent;
+
+       if (!(buffer->PhyData[0].PortFlags &
+           MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS))
+               rc = 1;
+
+ out_free_consistent:
+       pci_free_consistent(ioc->pcidev, hdr.ExtPageLength * 4,
+           buffer, dma_handle);
+ out:
+       return rc;
+}
+
 /**
  *     mpt_fault_reset_work - work performed on workq after ioc fault
  *     @work: input argument, used to derive ioc
@@ -290,7 +340,7 @@ mpt_fault_reset_work(struct work_struct *work)
        int              rc;
        unsigned long    flags;
 
-       if (ioc->diagPending || !ioc->active)
+       if (ioc->ioc_reset_in_progress || !ioc->active)
                goto out;
 
        ioc_raw_state = mpt_GetIocState(ioc, 0);
@@ -307,6 +357,12 @@ mpt_fault_reset_work(struct work_struct *work)
                        printk(MYIOC_s_WARN_FMT "IOC is in FAULT state after "
                            "reset (%04xh)\n", ioc->name, ioc_raw_state &
                            MPI_DOORBELL_DATA_MASK);
+       } else if (ioc->bus_type == SAS && ioc->sas_discovery_quiesce_io) {
+               if ((mpt_is_discovery_complete(ioc))) {
+                       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "clearing "
+                           "discovery_quiesce_io flag\n", ioc->name));
+                       ioc->sas_discovery_quiesce_io = 0;
+               }
        }
 
  out:
@@ -317,11 +373,11 @@ mpt_fault_reset_work(struct work_struct *work)
                ioc = ioc->alt_ioc;
 
        /* rearm the timer */
-       spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
        if (ioc->reset_work_q)
                queue_delayed_work(ioc->reset_work_q, &ioc->fault_reset_work,
                        msecs_to_jiffies(MPT_POLLING_INTERVAL));
-       spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 }
 
 
@@ -501,9 +557,9 @@ mpt_interrupt(int irq, void *bus_id)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_base_reply - MPT base driver's callback routine
+ *     mptbase_reply - MPT base driver's callback routine
  *     @ioc: Pointer to MPT_ADAPTER structure
- *     @mf: Pointer to original MPT request frame
+ *     @req: Pointer to original MPT request frame
  *     @reply: Pointer to MPT reply frame (NULL if TurboReply)
  *
  *     MPT base driver's callback routine; all base driver
@@ -514,122 +570,49 @@ mpt_interrupt(int irq, void *bus_id)
  *     should be freed, or 0 if it shouldn't.
  */
 static int
-mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
+mptbase_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
 {
+       EventNotificationReply_t *pEventReply;
+       u8 event;
+       int evHandlers;
        int freereq = 1;
-       u8 func;
-
-       dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply() called\n", ioc->name));
-#ifdef CONFIG_FUSION_LOGGING
-       if ((ioc->debug_level & MPT_DEBUG_MSG_FRAME) &&
-                       !(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) {
-               dmfprintk(ioc, printk(MYIOC_s_INFO_FMT ": Original request frame (@%p) header\n",
-                   ioc->name, mf));
-               DBG_DUMP_REQUEST_FRAME_HDR(ioc, (u32 *)mf);
-       }
-#endif
-
-       func = reply->u.hdr.Function;
-       dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, Function=%02Xh\n",
-                       ioc->name, func));
-
-       if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
-               EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply;
-               int evHandlers = 0;
-               int results;
-
-               results = ProcessEventNotification(ioc, pEvReply, &evHandlers);
-               if (results != evHandlers) {
-                       /* CHECKME! Any special handling needed here? */
-                       devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n",
-                                       ioc->name, evHandlers, results));
-               }
 
-               /*
-                *      Hmmm...  It seems that EventNotificationReply is an exception
-                *      to the rule of one reply per request.
-                */
-               if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY) {
+       switch (reply->u.hdr.Function) {
+       case MPI_FUNCTION_EVENT_NOTIFICATION:
+               pEventReply = (EventNotificationReply_t *)reply;
+               evHandlers = 0;
+               ProcessEventNotification(ioc, pEventReply, &evHandlers);
+               event = le32_to_cpu(pEventReply->Event) & 0xFF;
+               if (pEventReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
                        freereq = 0;
-               } else {
-                       devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "EVENT_NOTIFICATION reply %p returns Request frame\n",
-                               ioc->name, pEvReply));
-               }
-
-#ifdef CONFIG_PROC_FS
-//             LogEvent(ioc, pEvReply);
-#endif
-
-       } else if (func == MPI_FUNCTION_EVENT_ACK) {
-               dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_base_reply, EventAck reply received\n",
-                               ioc->name));
-       } else if (func == MPI_FUNCTION_CONFIG) {
-               CONFIGPARMS *pCfg;
-               unsigned long flags;
-
-               dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "config_complete (mf=%p,mr=%p)\n",
-                               ioc->name, mf, reply));
-
-               pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *)));
-
-               if (pCfg) {
-                       /* disable timer and remove from linked list */
-                       del_timer(&pCfg->timer);
-
-                       spin_lock_irqsave(&ioc->FreeQlock, flags);
-                       list_del(&pCfg->linkage);
-                       spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
-                       /*
-                        *      If IOC Status is SUCCESS, save the header
-                        *      and set the status code to GOOD.
-                        */
-                       pCfg->status = MPT_CONFIG_ERROR;
-                       if (reply) {
-                               ConfigReply_t   *pReply = (ConfigReply_t *)reply;
-                               u16              status;
-
-                               status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
-                               dcprintk(ioc, printk(MYIOC_s_NOTE_FMT "  IOCStatus=%04xh, IOCLogInfo=%08xh\n",
-                                    ioc->name, status, le32_to_cpu(pReply->IOCLogInfo)));
-
-                               pCfg->status = status;
-                               if (status == MPI_IOCSTATUS_SUCCESS) {
-                                       if ((pReply->Header.PageType &
-                                           MPI_CONFIG_PAGETYPE_MASK) ==
-                                           MPI_CONFIG_PAGETYPE_EXTENDED) {
-                                               pCfg->cfghdr.ehdr->ExtPageLength =
-                                                   le16_to_cpu(pReply->ExtPageLength);
-                                               pCfg->cfghdr.ehdr->ExtPageType =
-                                                   pReply->ExtPageType;
-                                       }
-                                       pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
-
-                                       /* If this is a regular header, save PageLength. */
-                                       /* LMP Do this better so not using a reserved field! */
-                                       pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
-                                       pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
-                                       pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
-                               }
-                       }
-
-                       /*
-                        *      Wake up the original calling thread
-                        */
-                       pCfg->wait_done = 1;
-                       wake_up(&mpt_waitq);
+               if (event != MPI_EVENT_EVENT_CHANGE)
+                       break;
+       case MPI_FUNCTION_CONFIG:
+       case MPI_FUNCTION_SAS_IO_UNIT_CONTROL:
+               ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+               if (reply) {
+                       ioc->mptbase_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+                       memcpy(ioc->mptbase_cmds.reply, reply,
+                           min(MPT_DEFAULT_FRAME_SIZE,
+                               4 * reply->u.reply.MsgLength));
                }
-       } else if (func == MPI_FUNCTION_SAS_IO_UNIT_CONTROL) {
-               /* we should be always getting a reply frame */
-               memcpy(ioc->persist_reply_frame, reply,
-                   min(MPT_DEFAULT_FRAME_SIZE,
-                   4*reply->u.reply.MsgLength));
-               del_timer(&ioc->persist_timer);
-               ioc->persist_wait_done = 1;
-               wake_up(&mpt_waitq);
-       } else {
-               printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
-                               ioc->name, func);
+               if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
+                       ioc->mptbase_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+                       complete(&ioc->mptbase_cmds.done);
+               } else
+                       freereq = 0;
+               if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_FREE_MF)
+                       freereq = 1;
+               break;
+       case MPI_FUNCTION_EVENT_ACK:
+               devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "EventAck reply received\n", ioc->name));
+               break;
+       default:
+               printk(MYIOC_s_ERR_FMT
+                   "Unexpected msg function (=%02Xh) reply received!\n",
+                   ioc->name, reply->u.hdr.Function);
+               break;
        }
 
        /*
@@ -988,17 +971,21 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
 
        /*  Put Request back on FreeQ!  */
        spin_lock_irqsave(&ioc->FreeQlock, flags);
-       mf->u.frame.linkage.arg1 = 0xdeadbeaf; /* signature to know if this mf is freed */
+       if (cpu_to_le32(mf->u.frame.linkage.arg1) == 0xdeadbeaf)
+               goto out;
+       /* signature to know if this mf is freed */
+       mf->u.frame.linkage.arg1 = cpu_to_le32(0xdeadbeaf);
        list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
 #ifdef MFCNT
        ioc->mfcnt--;
 #endif
+ out:
        spin_unlock_irqrestore(&ioc->FreeQlock, flags);
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mpt_add_sge - Place a simple SGE at address pAddr.
+ *     mpt_add_sge - Place a simple 32 bit SGE at address pAddr.
  *     @pAddr: virtual address for SGE
  *     @flagslength: SGE flags and data transfer length
  *     @dma_addr: Physical address
@@ -1006,23 +993,117 @@ mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
  *     This routine places a MPT request frame back on the MPT adapter's
  *     FreeQ.
  */
-void
-mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
+static void
+mpt_add_sge(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
 {
-       if (sizeof(dma_addr_t) == sizeof(u64)) {
-               SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+       SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
+       pSge->FlagsLength = cpu_to_le32(flagslength);
+       pSge->Address = cpu_to_le32(dma_addr);
+}
+
+/**
+ *     mpt_add_sge_64bit - Place a simple 64 bit SGE at address pAddr.
+ *     @pAddr: virtual address for SGE
+ *     @flagslength: SGE flags and data transfer length
+ *     @dma_addr: Physical address
+ *
+ *     This routine places a MPT request frame back on the MPT adapter's
+ *     FreeQ.
+ **/
+static void
+mpt_add_sge_64bit(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+       SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+       pSge->Address.Low = cpu_to_le32
+                       (lower_32_bits((unsigned long)(dma_addr)));
+       pSge->Address.High = cpu_to_le32
+                       (upper_32_bits((unsigned long)dma_addr));
+       pSge->FlagsLength = cpu_to_le32
+                       ((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
+}
+
+/**
+ *     mpt_add_sge_64bit_1078 - Place a simple 64 bit SGE at address pAddr
+ *     (1078 workaround).
+ *     @pAddr: virtual address for SGE
+ *     @flagslength: SGE flags and data transfer length
+ *     @dma_addr: Physical address
+ *
+ *     This routine places a MPT request frame back on the MPT adapter's
+ *     FreeQ.
+ **/
+static void
+mpt_add_sge_64bit_1078(void *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+       SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+       u32 tmp;
+
+       pSge->Address.Low = cpu_to_le32
+                       (lower_32_bits((unsigned long)(dma_addr)));
+       tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
+
+       /*
+        * 1078 errata workaround for the 36GB limitation
+        */
+       if ((((u64)dma_addr + MPI_SGE_LENGTH(flagslength)) >> 32)  == 9) {
+               flagslength |=
+                   MPI_SGE_SET_FLAGS(MPI_SGE_FLAGS_LOCAL_ADDRESS);
+               tmp |= (1<<31);
+               if (mpt_debug_level & MPT_DEBUG_36GB_MEM)
+                       printk(KERN_DEBUG "1078 P0M2 addressing for "
+                           "addr = 0x%llx len = %d\n",
+                           (unsigned long long)dma_addr,
+                           MPI_SGE_LENGTH(flagslength));
+       }
+
+       pSge->Address.High = cpu_to_le32(tmp);
+       pSge->FlagsLength = cpu_to_le32(
+               (flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *     mpt_add_chain - Place a 32 bit chain SGE at address pAddr.
+ *     @pAddr: virtual address for SGE
+ *     @next: nextChainOffset value (u32's)
+ *     @length: length of next SGL segment
+ *     @dma_addr: Physical address
+ *
+ */
+static void
+mpt_add_chain(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
+{
+               SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
+               pChain->Length = cpu_to_le16(length);
+               pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT;
+               pChain->NextChainOffset = next;
+               pChain->Address = cpu_to_le32(dma_addr);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *     mpt_add_chain_64bit - Place a 64 bit chain SGE at address pAddr.
+ *     @pAddr: virtual address for SGE
+ *     @next: nextChainOffset value (u32's)
+ *     @length: length of next SGL segment
+ *     @dma_addr: Physical address
+ *
+ */
+static void
+mpt_add_chain_64bit(void *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
+{
+               SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
                u32 tmp = dma_addr & 0xFFFFFFFF;
 
-               pSge->FlagsLength = cpu_to_le32(flagslength);
-               pSge->Address.Low = cpu_to_le32(tmp);
-               tmp = (u32) ((u64)dma_addr >> 32);
-               pSge->Address.High = cpu_to_le32(tmp);
+               pChain->Length = cpu_to_le16(length);
+               pChain->Flags = (MPI_SGE_FLAGS_CHAIN_ELEMENT |
+                                MPI_SGE_FLAGS_64_BIT_ADDRESSING);
 
-       } else {
-               SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
-               pSge->FlagsLength = cpu_to_le32(flagslength);
-               pSge->Address = cpu_to_le32(dma_addr);
-       }
+               pChain->NextChainOffset = next;
+
+               pChain->Address.Low = cpu_to_le32(tmp);
+               tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
+               pChain->Address.High = cpu_to_le32(tmp);
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1225,7 +1306,7 @@ mpt_host_page_alloc(MPT_ADAPTER *ioc, pIOCInit_t ioc_init)
        }
        flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
        flags_length |= ioc->HostPageBuffer_sz;
-       mpt_add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
+       ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
        ioc->facts.HostPageBufferSGE = ioc_init->HostPageBufferSGE;
 
 return 0;
@@ -1534,21 +1615,42 @@ mpt_mapresources(MPT_ADAPTER *ioc)
 
        pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
 
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
-           && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
-                   ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
-                   ioc->name));
-       } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
-           && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
-               dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
-                   ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
-                   ioc->name));
+       if (sizeof(dma_addr_t) > 4) {
+               const uint64_t required_mask = dma_get_required_mask
+                   (&pdev->dev);
+               if (required_mask > DMA_BIT_MASK(32)
+                       && !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
+                       && !pci_set_consistent_dma_mask(pdev,
+                                                DMA_BIT_MASK(64))) {
+                       ioc->dma_mask = DMA_BIT_MASK(64);
+                       dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+                               ": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+                               ioc->name));
+               } else if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+                       && !pci_set_consistent_dma_mask(pdev,
+                                               DMA_BIT_MASK(32))) {
+                       ioc->dma_mask = DMA_BIT_MASK(32);
+                       dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+                               ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+                               ioc->name));
+               } else {
+                       printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
+                           ioc->name, pci_name(pdev));
+                       return r;
+               }
        } else {
-               printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
-                   ioc->name, pci_name(pdev));
-               pci_release_selected_regions(pdev, ioc->bars);
-               return r;
+               if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
+                       && !pci_set_consistent_dma_mask(pdev,
+                                               DMA_BIT_MASK(32))) {
+                       ioc->dma_mask = DMA_BIT_MASK(32);
+                       dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+                               ": 32 BIT PCI BUS DMA ADDRESSING SUPPORTED\n",
+                               ioc->name));
+               } else {
+                       printk(MYIOC_s_WARN_FMT "no suitable DMA mask for %s\n",
+                           ioc->name, pci_name(pdev));
+                       return r;
+               }
        }
 
        mem_phys = msize = 0;
@@ -1632,6 +1734,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ioc->id = mpt_ids++;
        sprintf(ioc->name, "ioc%d", ioc->id);
+       dinitprintk(ioc, printk(KERN_WARNING MYNAM ": mpt_adapter_install\n"));
 
        /*
         * set initial debug level
@@ -1650,14 +1753,36 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
                return r;
        }
 
+       /*
+        * Setting up proper handlers for scatter gather handling
+        */
+       if (ioc->dma_mask == DMA_BIT_MASK(64)) {
+               if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
+                       ioc->add_sge = &mpt_add_sge_64bit_1078;
+               else
+                       ioc->add_sge = &mpt_add_sge_64bit;
+               ioc->add_chain = &mpt_add_chain_64bit;
+               ioc->sg_addr_size = 8;
+       } else {
+               ioc->add_sge = &mpt_add_sge;
+               ioc->add_chain = &mpt_add_chain;
+               ioc->sg_addr_size = 4;
+       }
+       ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
+
        ioc->alloc_total = sizeof(MPT_ADAPTER);
        ioc->req_sz = MPT_DEFAULT_FRAME_SIZE;           /* avoid div by zero! */
        ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
 
        ioc->pcidev = pdev;
-       ioc->diagPending = 0;
-       spin_lock_init(&ioc->diagLock);
-       spin_lock_init(&ioc->initializing_hba_lock);
+
+       spin_lock_init(&ioc->taskmgmt_lock);
+       mutex_init(&ioc->internal_cmds.mutex);
+       init_completion(&ioc->internal_cmds.done);
+       mutex_init(&ioc->mptbase_cmds.mutex);
+       init_completion(&ioc->mptbase_cmds.done);
+       mutex_init(&ioc->taskmgmt_cmds.mutex);
+       init_completion(&ioc->taskmgmt_cmds.done);
 
        /* Initialize the event logging.
         */
@@ -1670,16 +1795,13 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        ioc->mfcnt = 0;
 #endif
 
+       ioc->sh = NULL;
        ioc->cached_fw = NULL;
 
        /* Initilize SCSI Config Data structure
         */
        memset(&ioc->spi_data, 0, sizeof(SpiCfgData));
 
-       /* Initialize the running configQ head.
-        */
-       INIT_LIST_HEAD(&ioc->configQ);
-
        /* Initialize the fc rport list head.
         */
        INIT_LIST_HEAD(&ioc->fc_rports);
@@ -1690,9 +1812,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
 
        /* Initialize workqueue */
        INIT_DELAYED_WORK(&ioc->fault_reset_work, mpt_fault_reset_work);
-       spin_lock_init(&ioc->fault_reset_work_lock);
 
-       snprintf(ioc->reset_work_q_name, sizeof(ioc->reset_work_q_name),
+       snprintf(ioc->reset_work_q_name, MPT_KOBJ_NAME_LEN,
                 "mpt_poll_%d", ioc->id);
        ioc->reset_work_q =
                create_singlethread_workqueue(ioc->reset_work_q_name);
@@ -1767,11 +1888,14 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
        case MPI_MANUFACTPAGE_DEVID_SAS1064:
        case MPI_MANUFACTPAGE_DEVID_SAS1068:
                ioc->errata_flag_1064 = 1;
+               ioc->bus_type = SAS;
+               break;
 
        case MPI_MANUFACTPAGE_DEVID_SAS1064E:
        case MPI_MANUFACTPAGE_DEVID_SAS1068E:
        case MPI_MANUFACTPAGE_DEVID_SAS1078:
                ioc->bus_type = SAS;
+               break;
        }
 
 
@@ -1813,6 +1937,11 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
         */
        mpt_detect_bound_ports(ioc, pdev);
 
+       INIT_LIST_HEAD(&ioc->fw_event_list);
+       spin_lock_init(&ioc->fw_event_lock);
+       snprintf(ioc->fw_event_q_name, MPT_KOBJ_NAME_LEN, "mpt/%d", ioc->id);
+       ioc->fw_event_q = create_singlethread_workqueue(ioc->fw_event_q_name);
+
        if ((r = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_BRINGUP,
            CAN_SLEEP)) != 0){
                printk(MYIOC_s_ERR_FMT "didn't initialize properly! (%d)\n",
@@ -1885,13 +2014,18 @@ mpt_detach(struct pci_dev *pdev)
        /*
         * Stop polling ioc for fault condition
         */
-       spin_lock_irqsave(&ioc->fault_reset_work_lock, flags);
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
        wq = ioc->reset_work_q;
        ioc->reset_work_q = NULL;
-       spin_unlock_irqrestore(&ioc->fault_reset_work_lock, flags);
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
        cancel_delayed_work(&ioc->fault_reset_work);
        destroy_workqueue(wq);
 
+       spin_lock_irqsave(&ioc->fw_event_lock, flags);
+       wq = ioc->fw_event_q;
+       ioc->fw_event_q = NULL;
+       spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+       destroy_workqueue(wq);
 
        sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
        remove_proc_entry(pname, NULL);
@@ -1994,6 +2128,21 @@ mpt_resume(struct pci_dev *pdev)
        if (err)
                return err;
 
+       if (ioc->dma_mask == DMA_BIT_MASK(64)) {
+               if (pdev->device == MPI_MANUFACTPAGE_DEVID_SAS1078)
+                       ioc->add_sge = &mpt_add_sge_64bit_1078;
+               else
+                       ioc->add_sge = &mpt_add_sge_64bit;
+               ioc->add_chain = &mpt_add_chain_64bit;
+               ioc->sg_addr_size = 8;
+       } else {
+
+               ioc->add_sge = &mpt_add_sge;
+               ioc->add_chain = &mpt_add_chain;
+               ioc->sg_addr_size = 4;
+       }
+       ioc->SGE_size = sizeof(u32) + ioc->sg_addr_size;
+
        printk(MYIOC_s_INFO_FMT "pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
            ioc->name, (mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
            CHIPREG_READ32(&ioc->chip->Doorbell));
@@ -2091,12 +2240,16 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
        ioc->active = 0;
 
        if (ioc->alt_ioc) {
-               if (ioc->alt_ioc->active)
+               if (ioc->alt_ioc->active ||
+                   reason == MPT_HOSTEVENT_IOC_RECOVER) {
                        reset_alt_ioc_active = 1;
-
-               /* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */
-               CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF);
-               ioc->alt_ioc->active = 0;
+                       /* Disable alt-IOC's reply interrupts
+                        *  (and FreeQ) for a bit
+                        **/
+                       CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
+                               0xFFFFFFFF);
+                       ioc->alt_ioc->active = 0;
+               }
        }
 
        hard = 1;
@@ -2117,9 +2270,11 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                        }
 
                } else {
-                       printk(MYIOC_s_WARN_FMT "NOT READY!\n", ioc->name);
+                       printk(MYIOC_s_WARN_FMT
+                           "NOT READY WARNING!\n", ioc->name);
                }
-               return -1;
+               ret = -1;
+               goto out;
        }
 
        /* hard_reset_done = 0 if a soft reset was performed
@@ -2129,7 +2284,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
                        alt_ioc_ready = 1;
                else
-                       printk(MYIOC_s_WARN_FMT "alt_ioc not ready!\n", ioc->alt_ioc->name);
+                       printk(MYIOC_s_WARN_FMT
+                           ": alt-ioc Not ready WARNING!\n",
+                           ioc->alt_ioc->name);
        }
 
        for (ii=0; ii<5; ii++) {
@@ -2150,7 +2307,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
        if (alt_ioc_ready) {
                if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
                        dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-                           "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc));
+                           "Initial Alt IocFacts failed rc=%x\n",
+                           ioc->name, rc));
                        /* Retry - alt IOC was initialized once
                         */
                        rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
@@ -2194,16 +2352,20 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                            IRQF_SHARED, ioc->name, ioc);
                        if (rc < 0) {
                                printk(MYIOC_s_ERR_FMT "Unable to allocate "
-                                   "interrupt %d!\n", ioc->name, ioc->pcidev->irq);
+                                   "interrupt %d!\n",
+                                   ioc->name, ioc->pcidev->irq);
                                if (ioc->msi_enable)
                                        pci_disable_msi(ioc->pcidev);
-                               return -EBUSY;
+                               ret = -EBUSY;
+                               goto out;
                        }
                        irq_allocated = 1;
                        ioc->pci_irq = ioc->pcidev->irq;
                        pci_set_master(ioc->pcidev);            /* ?? */
-                       dprintk(ioc, printk(MYIOC_s_INFO_FMT "installed at interrupt "
-                           "%d\n", ioc->name, ioc->pcidev->irq));
+                       pci_set_drvdata(ioc->pcidev, ioc);
+                       dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+                           "installed at interrupt %d\n", ioc->name,
+                           ioc->pcidev->irq));
                }
        }
 
@@ -2212,17 +2374,22 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
         * init as upper addresses are needed for init.
         * If fails, continue with alt-ioc processing
         */
+       dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "PrimeIocFifos\n",
+           ioc->name));
        if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
                ret = -3;
 
        /* May need to check/upload firmware & data here!
         * If fails, continue with alt-ioc processing
         */
+       dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "SendIocInit\n",
+           ioc->name));
        if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
                ret = -4;
 // NEW!
        if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
-               printk(MYIOC_s_WARN_FMT ": alt_ioc (%d) FIFO mgmt alloc!\n",
+               printk(MYIOC_s_WARN_FMT
+                   ": alt-ioc (%d) FIFO mgmt alloc WARNING!\n",
                    ioc->alt_ioc->name, rc);
                alt_ioc_ready = 0;
                reset_alt_ioc_active = 0;
@@ -2232,8 +2399,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
                        alt_ioc_ready = 0;
                        reset_alt_ioc_active = 0;
-                       printk(MYIOC_s_WARN_FMT "alt_ioc (%d) init failure!\n",
-                           ioc->alt_ioc->name, rc);
+                       printk(MYIOC_s_WARN_FMT
+                               ": alt-ioc: (%d) init failure WARNING!\n",
+                                       ioc->alt_ioc->name, rc);
                }
        }
 
@@ -2269,28 +2437,36 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                }
        }
 
+       /*  Enable MPT base driver management of EventNotification
+        *  and EventAck handling.
+        */
+       if ((ret == 0) && (!ioc->facts.EventState)) {
+               dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+                       "SendEventNotification\n",
+                   ioc->name));
+               ret = SendEventNotification(ioc, 1, sleepFlag); /* 1=Enable */
+       }
+
+       if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
+               rc = SendEventNotification(ioc->alt_ioc, 1, sleepFlag);
+
        if (ret == 0) {
                /* Enable! (reply interrupt) */
                CHIPREG_WRITE32(&ioc->chip->IntMask, MPI_HIM_DIM);
                ioc->active = 1;
        }
-
-       if (reset_alt_ioc_active && ioc->alt_ioc) {
-               /* (re)Enable alt-IOC! (reply interrupt) */
-               dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "alt_ioc reply irq re-enabled\n",
-                   ioc->alt_ioc->name));
-               CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, MPI_HIM_DIM);
-               ioc->alt_ioc->active = 1;
+       if (rc == 0) {  /* alt ioc */
+               if (reset_alt_ioc_active && ioc->alt_ioc) {
+                       /* (re)Enable alt-IOC! (reply interrupt) */
+                       dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "alt-ioc"
+                               "reply irq re-enabled\n",
+                               ioc->alt_ioc->name));
+                       CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask,
+                               MPI_HIM_DIM);
+                       ioc->alt_ioc->active = 1;
+               }
        }
 
-       /*  Enable MPT base driver management of EventNotification
-        *  and EventAck handling.
-        */
-       if ((ret == 0) && (!ioc->facts.EventState))
-               (void) SendEventNotification(ioc, 1);   /* 1=Enable EventNotification */
-
-       if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
-               (void) SendEventNotification(ioc->alt_ioc, 1);  /* 1=Enable EventNotification */
 
        /*      Add additional "reason" check before call to GetLanConfigPages
         *      (combined with GetIoUnitPage2 call).  This prevents a somewhat
@@ -2306,8 +2482,9 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                mutex_init(&ioc->raid_data.inactive_list_mutex);
                INIT_LIST_HEAD(&ioc->raid_data.inactive_list);
 
-               if (ioc->bus_type == SAS) {
+               switch (ioc->bus_type) {
 
+               case SAS:
                        /* clear persistency table */
                        if(ioc->facts.IOCExceptions &
                            MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL) {
@@ -2321,8 +2498,15 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                         */
                        mpt_findImVolumes(ioc);
 
-               } else if (ioc->bus_type == FC) {
-                       if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) &&
+                       /* Check, and possibly reset, the coalescing value
+                        */
+                       mpt_read_ioc_pg_1(ioc);
+
+                       break;
+
+               case FC:
+                       if ((ioc->pfacts[0].ProtocolFlags &
+                               MPI_PORTFACTS_PROTOCOL_LAN) &&
                            (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
                                /*
                                 *  Pre-fetch the ports LAN MAC address!
@@ -2331,11 +2515,14 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                                (void) GetLanConfigPages(ioc);
                                a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
                                dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-                                   "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
-                                   ioc->name, a[5], a[4], a[3], a[2], a[1], a[0]));
-
+                                       "LanAddr = %02X:%02X:%02X"
+                                       ":%02X:%02X:%02X\n",
+                                       ioc->name, a[5], a[4],
+                                       a[3], a[2], a[1], a[0]));
                        }
-               } else {
+                       break;
+
+               case SPI:
                        /* Get NVRAM and adapter maximums from SPP 0 and 2
                         */
                        mpt_GetScsiPortSettings(ioc, 0);
@@ -2354,6 +2541,8 @@ mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
                        mpt_read_ioc_pg_1(ioc);
 
                        mpt_read_ioc_pg_4(ioc);
+
+                       break;
                }
 
                GetIoUnitPage2(ioc);
@@ -2435,16 +2624,20 @@ mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
                if (_pcidev == peer) {
                        /* Paranoia checks */
                        if (ioc->alt_ioc != NULL) {
-                               printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n",
-                                       ioc->name, ioc->alt_ioc->name);
+                               printk(MYIOC_s_WARN_FMT
+                                   "Oops, already bound (%s <==> %s)!\n",
+                                   ioc->name, ioc->name, ioc->alt_ioc->name);
                                break;
                        } else if (ioc_srch->alt_ioc != NULL) {
-                               printk(MYIOC_s_WARN_FMT "Oops, already bound to %s!\n",
-                                       ioc_srch->name, ioc_srch->alt_ioc->name);
+                               printk(MYIOC_s_WARN_FMT
+                                   "Oops, already bound (%s <==> %s)!\n",
+                                   ioc_srch->name, ioc_srch->name,
+                                   ioc_srch->alt_ioc->name);
                                break;
                        }
-                       dprintk(ioc, printk(MYIOC_s_INFO_FMT "FOUND! binding to %s\n",
-                               ioc->name, ioc_srch->name));
+                       dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                               "FOUND! binding %s <==> %s\n",
+                               ioc->name, ioc->name, ioc_srch->name));
                        ioc_srch->alt_ioc = ioc;
                        ioc->alt_ioc = ioc_srch;
                }
@@ -2464,8 +2657,8 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
        int ret;
 
        if (ioc->cached_fw != NULL) {
-               ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: Pushing FW onto "
-                   "adapter\n", __func__, ioc->name));
+               ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                       "%s: Pushing FW onto adapter\n", __func__, ioc->name));
                if ((ret = mpt_downloadboot(ioc, (MpiFwHeader_t *)
                    ioc->cached_fw, CAN_SLEEP)) < 0) {
                        printk(MYIOC_s_WARN_FMT
@@ -2474,11 +2667,30 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
                }
        }
 
+       /*
+        * Put the controller into ready state (if its not already)
+        */
+       if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY) {
+               if (!SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET,
+                   CAN_SLEEP)) {
+                       if (mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_READY)
+                               printk(MYIOC_s_ERR_FMT "%s:  IOC msg unit "
+                                   "reset failed to put ioc in ready state!\n",
+                                   ioc->name, __func__);
+               } else
+                       printk(MYIOC_s_ERR_FMT "%s:  IOC msg unit reset "
+                           "failed!\n", ioc->name, __func__);
+       }
+
+
        /* Disable adapter interrupts! */
+       synchronize_irq(ioc->pcidev->irq);
        CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
        ioc->active = 0;
+
        /* Clear any lingering interrupt */
        CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+       CHIPREG_READ32(&ioc->chip->IntStatus);
 
        if (ioc->alloc != NULL) {
                sz = ioc->alloc_sz;
@@ -2538,19 +2750,22 @@ mpt_adapter_disable(MPT_ADAPTER *ioc)
                if((ret = mpt_host_page_access_control(ioc,
                    MPI_DB_HPBAC_FREE_BUFFER, NO_SLEEP)) != 0) {
                        printk(MYIOC_s_ERR_FMT
-                          "host page buffers free failed (%d)!\n",
-                           ioc->name, ret);
+                          ": %s: host page buffers free failed (%d)!\n",
+                           ioc->name, __func__, ret);
                }
-               dexitprintk(ioc, printk(MYIOC_s_INFO_FMT "HostPageBuffer free  @ %p, sz=%d bytes\n",
-                       ioc->name, ioc->HostPageBuffer, ioc->HostPageBuffer_sz));
+               dexitprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                       "HostPageBuffer free  @ %p, sz=%d bytes\n",
+                       ioc->name, ioc->HostPageBuffer,
+                       ioc->HostPageBuffer_sz));
                pci_free_consistent(ioc->pcidev, ioc->HostPageBuffer_sz,
                    ioc->HostPageBuffer, ioc->HostPageBuffer_dma);
                ioc->HostPageBuffer = NULL;
                ioc->HostPageBuffer_sz = 0;
                ioc->alloc_total -= ioc->HostPageBuffer_sz;
        }
-}
 
+       pci_set_drvdata(ioc->pcidev, NULL);
+}
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     mpt_adapter_dispose - Free all resources associated with an MPT adapter
@@ -2690,8 +2905,12 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
        }
 
        /* Is it already READY? */
-       if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)
+       if (!statefault &&
+           ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY)) {
+               dinitprintk(ioc, printk(MYIOC_s_INFO_FMT
+                   "IOC is in READY state\n", ioc->name));
                return 0;
+       }
 
        /*
         *      Check to see if IOC is in FAULT state.
@@ -2764,8 +2983,9 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
 
                ii++; cntdn--;
                if (!cntdn) {
-                       printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n",
-                                       ioc->name, (int)((ii+5)/HZ));
+                       printk(MYIOC_s_ERR_FMT
+                               "Wait IOC_READY state (0x%x) timeout(%d)!\n",
+                               ioc->name, ioc_state, (int)((ii+5)/HZ));
                        return -ETIME;
                }
 
@@ -2778,9 +2998,8 @@ MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
        }
 
        if (statefault < 3) {
-               printk(MYIOC_s_INFO_FMT "Recovered from %s\n",
-                               ioc->name,
-                               statefault==1 ? "stuck handshake" : "IOC FAULT");
+               printk(MYIOC_s_INFO_FMT "Recovered from %s\n", ioc->name,
+                       statefault == 1 ? "stuck handshake" : "IOC FAULT");
        }
 
        return hard_reset_done;
@@ -2833,8 +3052,9 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
 
        /* IOC *must* NOT be in RESET state! */
        if (ioc->last_state == MPI_IOC_STATE_RESET) {
-               printk(MYIOC_s_ERR_FMT "Can't get IOCFacts NOT READY! (%08x)\n",
-                   ioc->name, ioc->last_state );
+               printk(KERN_ERR MYNAM
+                   ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
+                   ioc->name, ioc->last_state);
                return -44;
        }
 
@@ -2896,7 +3116,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
                 *      Old: u16{Major(4),Minor(4),SubMinor(8)}
                 *      New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
                 */
-               if (facts->MsgVersion < 0x0102) {
+               if (facts->MsgVersion < MPI_VERSION_01_02) {
                        /*
                         *      Handle old FC f/w style, convert to new...
                         */
@@ -2908,9 +3128,11 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
                        facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
 
                facts->ProductID = le16_to_cpu(facts->ProductID);
+
                if ((ioc->facts.ProductID & MPI_FW_HEADER_PID_PROD_MASK)
                    > MPI_FW_HEADER_PID_PROD_TARGET_SCSI)
                        ioc->ir_firmware = 1;
+
                facts->CurrentHostMfaHighAddr =
                                le32_to_cpu(facts->CurrentHostMfaHighAddr);
                facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
@@ -2926,7 +3148,7 @@ GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
                 * to 14 in MPI-1.01.0x.
                 */
                if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
-                   facts->MsgVersion > 0x0100) {
+                   facts->MsgVersion > MPI_VERSION_01_00) {
                        facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
                }
 
@@ -3108,6 +3330,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
 
        ioc_init.MaxDevices = (U8)ioc->devices_per_bus;
        ioc_init.MaxBuses = (U8)ioc->number_of_buses;
+
        dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "facts.MsgVersion=%x\n",
                   ioc->name, ioc->facts.MsgVersion));
        if (ioc->facts.MsgVersion >= MPI_VERSION_01_05) {
@@ -3122,7 +3345,7 @@ SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
        }
        ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz);   /* in BYTES */
 
-       if (sizeof(dma_addr_t) == sizeof(u64)) {
+       if (ioc->sg_addr_size == sizeof(u64)) {
                /* Save the upper 32-bits of the request
                 * (reply) and sense buffers.
                 */
@@ -3325,11 +3548,10 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
        FWUpload_t              *prequest;
        FWUploadReply_t         *preply;
        FWUploadTCSGE_t         *ptcsge;
-       int                      sgeoffset;
        u32                      flagsLength;
        int                      ii, sz, reply_sz;
        int                      cmdStatus;
-
+       int                     request_size;
        /* If the image size is 0, we are done.
         */
        if ((sz = ioc->facts.FWImageSize) == 0)
@@ -3364,42 +3586,41 @@ mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
        ptcsge->ImageSize = cpu_to_le32(sz);
        ptcsge++;
 
-       sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t);
-
        flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
-       mpt_add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
-
-       sgeoffset += sizeof(u32) + sizeof(dma_addr_t);
-       dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": Sending FW Upload (req @ %p) sgeoffset=%d \n",
-           ioc->name, prequest, sgeoffset));
+       ioc->add_sge((char *)ptcsge, flagsLength, ioc->cached_fw_dma);
+       request_size = offsetof(FWUpload_t, SGL) + sizeof(FWUploadTCSGE_t) +
+           ioc->SGE_size;
+       dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending FW Upload "
+           " (req @ %p) fw_size=%d mf_request_size=%d\n", ioc->name, prequest,
+           ioc->facts.FWImageSize, request_size));
        DBG_DUMP_FW_REQUEST_FRAME(ioc, (u32 *)prequest);
 
-       ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest,
-                               reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag);
+       ii = mpt_handshake_req_reply_wait(ioc, request_size, (u32 *)prequest,
+           reply_sz, (u16 *)preply, 65 /*seconds*/, sleepFlag);
 
-       dinitprintk(ioc, printk(MYIOC_s_INFO_FMT ": FW Upload completed rc=%x \n", ioc->name, ii));
+       dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "FW Upload completed "
+           "rc=%x \n", ioc->name, ii));
 
        cmdStatus = -EFAULT;
        if (ii == 0) {
                /* Handshake transfer was complete and successful.
                 * Check the Reply Frame.
                 */
-               int status, transfer_sz;
-               status = le16_to_cpu(preply->IOCStatus);
-               if (status == MPI_IOCSTATUS_SUCCESS) {
-                       transfer_sz = le32_to_cpu(preply->ActualImageSize);
-                       if (transfer_sz == sz)
+               int status;
+               status = le16_to_cpu(preply->IOCStatus) &
+                               MPI_IOCSTATUS_MASK;
+               if (status == MPI_IOCSTATUS_SUCCESS &&
+                   ioc->facts.FWImageSize ==
+                   le32_to_cpu(preply->ActualImageSize))
                                cmdStatus = 0;
-               }
        }
        dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": do_upload cmdStatus=%d \n",
                        ioc->name, cmdStatus));
 
 
        if (cmdStatus) {
-
-               ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": fw upload failed, freeing image \n",
-                       ioc->name));
+               ddlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "fw upload failed, "
+                   "freeing image \n", ioc->name));
                mpt_free_fw_memory(ioc);
        }
        kfree(prequest);
@@ -3723,6 +3944,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
        CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
 
        if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078) {
+
+               if (!ignore)
+                       return 0;
+
                drsprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: Doorbell=%p; 1078 reset "
                        "address=%p\n",  ioc->name, __func__,
                        &ioc->chip->Doorbell, &ioc->chip->Reset_1078));
@@ -3740,6 +3965,7 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
                                "looking for READY STATE: doorbell=%x"
                                " count=%d\n",
                                ioc->name, doorbell, count));
+
                        if (doorbell == MPI_IOC_STATE_READY) {
                                return 1;
                        }
@@ -3890,6 +4116,10 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
                                doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
                                doorbell &= MPI_IOC_STATE_MASK;
 
+                               drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                                   "looking for READY STATE: doorbell=%x"
+                                   " count=%d\n", ioc->name, doorbell, count));
+
                                if (doorbell == MPI_IOC_STATE_READY) {
                                        break;
                                }
@@ -3901,6 +4131,11 @@ mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
                                        mdelay (1000);
                                }
                        }
+
+                       if (doorbell != MPI_IOC_STATE_READY)
+                               printk(MYIOC_s_ERR_FMT "Failed to come READY "
+                                   "after reset! IocState=%x", ioc->name,
+                                   doorbell);
                }
        }
 
@@ -4019,8 +4254,9 @@ SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
                        if (sleepFlag != CAN_SLEEP)
                                count *= 10;
 
-                       printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n",
-                           ioc->name, (int)((count+5)/HZ));
+                       printk(MYIOC_s_ERR_FMT
+                           "Wait IOC_READY state (0x%x) timeout(%d)!\n",
+                           ioc->name, state, (int)((count+5)/HZ));
                        return -ETIME;
                }
 
@@ -4090,24 +4326,29 @@ initChainBuffers(MPT_ADAPTER *ioc)
         * num_sge = num sge in request frame + last chain buffer
         * scale = num sge per chain buffer if no chain element
         */
-       scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
-       if (sizeof(dma_addr_t) == sizeof(u64))
-               num_sge =  scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32));
+       scale = ioc->req_sz / ioc->SGE_size;
+       if (ioc->sg_addr_size == sizeof(u64))
+               num_sge =  scale + (ioc->req_sz - 60) / ioc->SGE_size;
        else
-               num_sge =  1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32));
+               num_sge =  1 + scale + (ioc->req_sz - 64) / ioc->SGE_size;
 
-       if (sizeof(dma_addr_t) == sizeof(u64)) {
+       if (ioc->sg_addr_size == sizeof(u64)) {
                numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
-                       (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32));
+                       (ioc->req_sz - 60) / ioc->SGE_size;
        } else {
-               numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
-                       (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32));
+               numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) +
+                   scale + (ioc->req_sz - 64) / ioc->SGE_size;
        }
        dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "num_sge=%d numSGE=%d\n",
                ioc->name, num_sge, numSGE));
 
-       if ( numSGE > MPT_SCSI_SG_DEPTH )
-               numSGE = MPT_SCSI_SG_DEPTH;
+       if (ioc->bus_type == FC) {
+               if (numSGE > MPT_SCSI_FC_SG_DEPTH)
+                       numSGE = MPT_SCSI_FC_SG_DEPTH;
+       } else {
+               if (numSGE > MPT_SCSI_SG_DEPTH)
+                       numSGE = MPT_SCSI_SG_DEPTH;
+       }
 
        num_chain = 1;
        while (numSGE - num_sge > 0) {
@@ -4161,12 +4402,42 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
        dma_addr_t alloc_dma;
        u8 *mem;
        int i, reply_sz, sz, total_size, num_chain;
+       u64     dma_mask;
+
+       dma_mask = 0;
 
        /*  Prime reply FIFO...  */
 
        if (ioc->reply_frames == NULL) {
                if ( (num_chain = initChainBuffers(ioc)) < 0)
                        return -1;
+               /*
+                * 1078 errata workaround for the 36GB limitation
+                */
+               if (ioc->pcidev->device == MPI_MANUFACTPAGE_DEVID_SAS1078 &&
+                   ioc->dma_mask > DMA_35BIT_MASK) {
+                       if (!pci_set_dma_mask(ioc->pcidev, DMA_BIT_MASK(32))
+                           && !pci_set_consistent_dma_mask(ioc->pcidev,
+                           DMA_BIT_MASK(32))) {
+                               dma_mask = DMA_35BIT_MASK;
+                               d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                                   "setting 35 bit addressing for "
+                                   "Request/Reply/Chain and Sense Buffers\n",
+                                   ioc->name));
+                       } else {
+                               /*Reseting DMA mask to 64 bit*/
+                               pci_set_dma_mask(ioc->pcidev,
+                                       DMA_BIT_MASK(64));
+                               pci_set_consistent_dma_mask(ioc->pcidev,
+                                       DMA_BIT_MASK(64));
+
+                               printk(MYIOC_s_ERR_FMT
+                                   "failed setting 35 bit addressing for "
+                                   "Request/Reply/Chain and Sense Buffers\n",
+                                   ioc->name);
+                               return -1;
+                       }
+               }
 
                total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
                dinitprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
@@ -4305,9 +4576,16 @@ PrimeIocFifos(MPT_ADAPTER *ioc)
                alloc_dma += ioc->reply_sz;
        }
 
+       if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
+           ioc->dma_mask) && !pci_set_consistent_dma_mask(ioc->pcidev,
+           ioc->dma_mask))
+               d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "restoring 64 bit addressing\n", ioc->name));
+
        return 0;
 
 out_fail:
+
        if (ioc->alloc != NULL) {
                sz = ioc->alloc_sz;
                pci_free_consistent(ioc->pcidev,
@@ -4324,6 +4602,13 @@ out_fail:
                                ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
                ioc->sense_buf_pool = NULL;
        }
+
+       if (dma_mask == DMA_35BIT_MASK && !pci_set_dma_mask(ioc->pcidev,
+           DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(ioc->pcidev,
+           DMA_BIT_MASK(64)))
+               d36memprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "restoring 64 bit addressing\n", ioc->name));
+
        return -1;
 }
 
@@ -4759,7 +5044,14 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
        SasIoUnitControlReply_t         *sasIoUnitCntrReply;
        MPT_FRAME_HDR                   *mf = NULL;
        MPIHeader_t                     *mpi_hdr;
+       int                             ret = 0;
+       unsigned long                   timeleft;
+
+       mutex_lock(&ioc->mptbase_cmds.mutex);
 
+       /* init the internal cmd struct */
+       memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
+       INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
 
        /* insure garbage is not sent to fw */
        switch(persist_opcode) {
@@ -4769,17 +5061,19 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
                break;
 
        default:
-               return -1;
-               break;
+               ret = -1;
+               goto out;
        }
 
-       printk("%s: persist_opcode=%x\n",__func__, persist_opcode);
+       printk(KERN_DEBUG  "%s: persist_opcode=%x\n",
+               __func__, persist_opcode);
 
        /* Get a MF for this command.
         */
        if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
-               printk("%s: no msg frames!\n",__func__);
-               return -1;
+               printk(KERN_DEBUG "%s: no msg frames!\n", __func__);
+               ret = -1;
+               goto out;
         }
 
        mpi_hdr = (MPIHeader_t *) mf;
@@ -4789,27 +5083,42 @@ mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode)
        sasIoUnitCntrReq->MsgContext = mpi_hdr->MsgContext;
        sasIoUnitCntrReq->Operation = persist_opcode;
 
-       init_timer(&ioc->persist_timer);
-       ioc->persist_timer.data = (unsigned long) ioc;
-       ioc->persist_timer.function = mpt_timer_expired;
-       ioc->persist_timer.expires = jiffies + HZ*10 /* 10 sec */;
-       ioc->persist_wait_done=0;
-       add_timer(&ioc->persist_timer);
        mpt_put_msg_frame(mpt_base_index, ioc, mf);
-       wait_event(mpt_waitq, ioc->persist_wait_done);
+       timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done, 10*HZ);
+       if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               ret = -ETIME;
+               printk(KERN_DEBUG "%s: failed\n", __func__);
+               if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+                       goto out;
+               if (!timeleft) {
+                       printk(KERN_DEBUG "%s: Issuing Reset from %s!!\n",
+                           ioc->name, __func__);
+                       mpt_HardResetHandler(ioc, CAN_SLEEP);
+                       mpt_free_msg_frame(ioc, mf);
+               }
+               goto out;
+       }
+
+       if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+               ret = -1;
+               goto out;
+       }
 
        sasIoUnitCntrReply =
-           (SasIoUnitControlReply_t *)ioc->persist_reply_frame;
+           (SasIoUnitControlReply_t *)ioc->mptbase_cmds.reply;
        if (le16_to_cpu(sasIoUnitCntrReply->IOCStatus) != MPI_IOCSTATUS_SUCCESS) {
-               printk("%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
-                   __func__,
-                   sasIoUnitCntrReply->IOCStatus,
+               printk(KERN_DEBUG "%s: IOCStatus=0x%X IOCLogInfo=0x%X\n",
+                   __func__, sasIoUnitCntrReply->IOCStatus,
                    sasIoUnitCntrReply->IOCLogInfo);
-               return -1;
-       }
+               printk(KERN_DEBUG "%s: failed\n", __func__);
+               ret = -1;
+       } else
+               printk(KERN_DEBUG "%s: success\n", __func__);
+ out:
 
-       printk("%s: success\n",__func__);
-       return 0;
+       CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
+       mutex_unlock(&ioc->mptbase_cmds.mutex);
+       return ret;
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5394,17 +5703,20 @@ mpt_inactive_raid_volumes(MPT_ADAPTER *ioc, u8 channel, u8 id)
  *     -ENOMEM if pci_alloc failed
  **/
 int
-mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk)
+mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num,
+                       RaidPhysDiskPage0_t *phys_disk)
 {
-       CONFIGPARMS                     cfg;
-       ConfigPageHeader_t              hdr;
+       CONFIGPARMS                     cfg;
+       ConfigPageHeader_t              hdr;
        dma_addr_t                      dma_handle;
        pRaidPhysDiskPage0_t            buffer = NULL;
        int                             rc;
 
        memset(&cfg, 0 , sizeof(CONFIGPARMS));
        memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+       memset(phys_disk, 0, sizeof(RaidPhysDiskPage0_t));
 
+       hdr.PageVersion = MPI_RAIDPHYSDISKPAGE0_PAGEVERSION;
        hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
        cfg.cfghdr.hdr = &hdr;
        cfg.physAddr = -1;
@@ -5450,6 +5762,161 @@ mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t
        return rc;
 }
 
+/**
+ *     mpt_raid_phys_disk_get_num_paths - returns number paths associated to this phys_num
+ *     @ioc: Pointer to a Adapter Structure
+ *     @phys_disk_num: io unit unique phys disk num generated by the ioc
+ *
+ *     Return:
+ *     returns number paths
+ **/
+int
+mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc, u8 phys_disk_num)
+{
+       CONFIGPARMS                     cfg;
+       ConfigPageHeader_t              hdr;
+       dma_addr_t                      dma_handle;
+       pRaidPhysDiskPage1_t            buffer = NULL;
+       int                             rc;
+
+       memset(&cfg, 0 , sizeof(CONFIGPARMS));
+       memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+
+       hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
+       hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
+       hdr.PageNumber = 1;
+       cfg.cfghdr.hdr = &hdr;
+       cfg.physAddr = -1;
+       cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+       if (mpt_config(ioc, &cfg) != 0) {
+               rc = 0;
+               goto out;
+       }
+
+       if (!hdr.PageLength) {
+               rc = 0;
+               goto out;
+       }
+
+       buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+           &dma_handle);
+
+       if (!buffer) {
+               rc = 0;
+               goto out;
+       }
+
+       cfg.physAddr = dma_handle;
+       cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+       cfg.pageAddr = phys_disk_num;
+
+       if (mpt_config(ioc, &cfg) != 0) {
+               rc = 0;
+               goto out;
+       }
+
+       rc = buffer->NumPhysDiskPaths;
+ out:
+
+       if (buffer)
+               pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+                   dma_handle);
+
+       return rc;
+}
+EXPORT_SYMBOL(mpt_raid_phys_disk_get_num_paths);
+
+/**
+ *     mpt_raid_phys_disk_pg1 - returns phys disk page 1
+ *     @ioc: Pointer to a Adapter Structure
+ *     @phys_disk_num: io unit unique phys disk num generated by the ioc
+ *     @phys_disk: requested payload data returned
+ *
+ *     Return:
+ *     0 on success
+ *     -EFAULT if read of config page header fails or data pointer not NULL
+ *     -ENOMEM if pci_alloc failed
+ **/
+int
+mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
+               RaidPhysDiskPage1_t *phys_disk)
+{
+       CONFIGPARMS                     cfg;
+       ConfigPageHeader_t              hdr;
+       dma_addr_t                      dma_handle;
+       pRaidPhysDiskPage1_t            buffer = NULL;
+       int                             rc;
+       int                             i;
+       __le64                          sas_address;
+
+       memset(&cfg, 0 , sizeof(CONFIGPARMS));
+       memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+       rc = 0;
+
+       hdr.PageVersion = MPI_RAIDPHYSDISKPAGE1_PAGEVERSION;
+       hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_PHYSDISK;
+       hdr.PageNumber = 1;
+       cfg.cfghdr.hdr = &hdr;
+       cfg.physAddr = -1;
+       cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+
+       if (mpt_config(ioc, &cfg) != 0) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       if (!hdr.PageLength) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+           &dma_handle);
+
+       if (!buffer) {
+               rc = -ENOMEM;
+               goto out;
+       }
+
+       cfg.physAddr = dma_handle;
+       cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+       cfg.pageAddr = phys_disk_num;
+
+       if (mpt_config(ioc, &cfg) != 0) {
+               rc = -EFAULT;
+               goto out;
+       }
+
+       phys_disk->NumPhysDiskPaths = buffer->NumPhysDiskPaths;
+       phys_disk->PhysDiskNum = phys_disk_num;
+       for (i = 0; i < phys_disk->NumPhysDiskPaths; i++) {
+               phys_disk->Path[i].PhysDiskID = buffer->Path[i].PhysDiskID;
+               phys_disk->Path[i].PhysDiskBus = buffer->Path[i].PhysDiskBus;
+               phys_disk->Path[i].OwnerIdentifier =
+                               buffer->Path[i].OwnerIdentifier;
+               phys_disk->Path[i].Flags = le16_to_cpu(buffer->Path[i].Flags);
+               memcpy(&sas_address, &buffer->Path[i].WWID, sizeof(__le64));
+               sas_address = le64_to_cpu(sas_address);
+               memcpy(&phys_disk->Path[i].WWID, &sas_address, sizeof(__le64));
+               memcpy(&sas_address,
+                               &buffer->Path[i].OwnerWWID, sizeof(__le64));
+               sas_address = le64_to_cpu(sas_address);
+               memcpy(&phys_disk->Path[i].OwnerWWID,
+                               &sas_address, sizeof(__le64));
+       }
+
+ out:
+
+       if (buffer)
+               pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+                   dma_handle);
+
+       return rc;
+}
+EXPORT_SYMBOL(mpt_raid_phys_disk_pg1);
+
+
 /**
  *     mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
  *     @ioc: Pointer to a Adapter Strucutre
@@ -5775,30 +6242,28 @@ mpt_get_manufacturing_pg_0(MPT_ADAPTER *ioc)
  *     SendEventNotification - Send EventNotification (on or off) request to adapter
  *     @ioc: Pointer to MPT_ADAPTER structure
  *     @EvSwitch: Event switch flags
+ *     @sleepFlag: Specifies whether the process can sleep
  */
 static int
-SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch)
+SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch, int sleepFlag)
 {
-       EventNotification_t     *evnp;
+       EventNotification_t     evn;
+       MPIDefaultReply_t       reply_buf;
 
-       evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc);
-       if (evnp == NULL) {
-               devtverboseprintk(ioc, printk(MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
-                               ioc->name));
-               return 0;
-       }
-       memset(evnp, 0, sizeof(*evnp));
+       memset(&evn, 0, sizeof(EventNotification_t));
+       memset(&reply_buf, 0, sizeof(MPIDefaultReply_t));
 
-       devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending EventNotification (%d) request %p\n", ioc->name, EvSwitch, evnp));
+       evn.Function = MPI_FUNCTION_EVENT_NOTIFICATION;
+       evn.Switch = EvSwitch;
+       evn.MsgContext = cpu_to_le32(mpt_base_index << 16);
 
-       evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
-       evnp->ChainOffset = 0;
-       evnp->MsgFlags = 0;
-       evnp->Switch = EvSwitch;
-
-       mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp);
+       devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "Sending EventNotification (%d) request %p\n",
+           ioc->name, EvSwitch, &evn));
 
-       return 0;
+       return mpt_handshake_req_reply_wait(ioc, sizeof(EventNotification_t),
+           (u32 *)&evn, sizeof(MPIDefaultReply_t), (u16 *)&reply_buf, 30,
+           sleepFlag);
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5814,7 +6279,7 @@ SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
 
        if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
                dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
-                   ioc->name,__func__));
+                   ioc->name, __func__));
                return -1;
        }
 
@@ -5851,12 +6316,19 @@ int
 mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
 {
        Config_t        *pReq;
+       ConfigReply_t   *pReply;
        ConfigExtendedPageHeader_t  *pExtHdr = NULL;
        MPT_FRAME_HDR   *mf;
-       unsigned long    flags;
-       int              ii, rc;
+       int              ii;
        int              flagsLength;
-       int              in_isr;
+       long             timeout;
+       int              ret;
+       u8               page_type = 0, extend_page;
+       unsigned long    timeleft;
+       unsigned long    flags;
+    int                 in_isr;
+       u8               issue_hard_reset = 0;
+       u8               retry_count = 0;
 
        /*      Prevent calling wait_event() (below), if caller happens
         *      to be in ISR context, because that is fatal!
@@ -5866,15 +6338,43 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
                dcprintk(ioc, printk(MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
                                ioc->name));
                return -EPERM;
+    }
+
+       /* don't send a config page during diag reset */
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       if (ioc->ioc_reset_in_progress) {
+               dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: busy with host reset\n", ioc->name, __func__));
+               spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+               return -EBUSY;
+       }
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+       /* don't send if no chance of success */
+       if (!ioc->active ||
+           mpt_GetIocState(ioc, 1) != MPI_IOC_STATE_OPERATIONAL) {
+               dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: ioc not operational, %d, %xh\n",
+                   ioc->name, __func__, ioc->active,
+                   mpt_GetIocState(ioc, 0)));
+               return -EFAULT;
        }
 
+ retry_config:
+       mutex_lock(&ioc->mptbase_cmds.mutex);
+       /* init the internal cmd struct */
+       memset(ioc->mptbase_cmds.reply, 0 , MPT_DEFAULT_FRAME_SIZE);
+       INITIALIZE_MGMT_STATUS(ioc->mptbase_cmds.status)
+
        /* Get and Populate a free Frame
         */
        if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
-               dcprintk(ioc, printk(MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n",
-                               ioc->name));
-               return -EAGAIN;
+               dcprintk(ioc, printk(MYIOC_s_WARN_FMT
+               "mpt_config: no msg frames!\n", ioc->name));
+               ret = -EAGAIN;
+               goto out;
        }
+
        pReq = (Config_t *)mf;
        pReq->Action = pCfg->action;
        pReq->Reserved = 0;
@@ -5900,7 +6400,9 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
                pReq->ExtPageType = pExtHdr->ExtPageType;
                pReq->Header.PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 
-               /* Page Length must be treated as a reserved field for the extended header. */
+               /* Page Length must be treated as a reserved field for the
+                * extended header.
+                */
                pReq->Header.PageLength = 0;
        }
 
@@ -5913,78 +6415,91 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
        else
                flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
 
-       if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) == MPI_CONFIG_PAGETYPE_EXTENDED) {
+       if ((pCfg->cfghdr.hdr->PageType & MPI_CONFIG_PAGETYPE_MASK) ==
+           MPI_CONFIG_PAGETYPE_EXTENDED) {
                flagsLength |= pExtHdr->ExtPageLength * 4;
-
-               dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n",
-                       ioc->name, pReq->ExtPageType, pReq->Header.PageNumber, pReq->Action));
-       }
-       else {
+               page_type = pReq->ExtPageType;
+               extend_page = 1;
+       } else {
                flagsLength |= pCfg->cfghdr.hdr->PageLength * 4;
-
-               dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Config request type %d, page %d and action %d\n",
-                       ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
+               page_type = pReq->Header.PageType;
+               extend_page = 0;
        }
 
-       mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
-
-       /* Append pCfg pointer to end of mf
-        */
-       *((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) =  (void *) pCfg;
+       dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "Sending Config request type 0x%x, page 0x%x and action %d\n",
+           ioc->name, page_type, pReq->Header.PageNumber, pReq->Action));
 
-       /* Initalize the timer
-        */
-       init_timer_on_stack(&pCfg->timer);
-       pCfg->timer.data = (unsigned long) ioc;
-       pCfg->timer.function = mpt_timer_expired;
-       pCfg->wait_done = 0;
-
-       /* Set the timer; ensure 10 second minimum */
-       if (pCfg->timeout < 10)
-               pCfg->timer.expires = jiffies + HZ*10;
-       else
-               pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
-
-       /* Add to end of Q, set timer and then issue this command */
-       spin_lock_irqsave(&ioc->FreeQlock, flags);
-       list_add_tail(&pCfg->linkage, &ioc->configQ);
-       spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
-       add_timer(&pCfg->timer);
+       ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
+       timeout = (pCfg->timeout < 15) ? HZ*15 : HZ*pCfg->timeout;
        mpt_put_msg_frame(mpt_base_index, ioc, mf);
-       wait_event(mpt_waitq, pCfg->wait_done);
-
-       /* mf has been freed - do not access */
+       timeleft = wait_for_completion_timeout(&ioc->mptbase_cmds.done,
+               timeout);
+       if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               ret = -ETIME;
+               dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "Failed Sending Config request type 0x%x, page 0x%x,"
+                   " action %d, status %xh, time left %ld\n\n",
+                       ioc->name, page_type, pReq->Header.PageNumber,
+                       pReq->Action, ioc->mptbase_cmds.status, timeleft));
+               if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+                       goto out;
+               if (!timeleft)
+                       issue_hard_reset = 1;
+               goto out;
+       }
 
-       rc = pCfg->status;
+       if (!(ioc->mptbase_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+               ret = -1;
+               goto out;
+       }
+       pReply = (ConfigReply_t *)ioc->mptbase_cmds.reply;
+       ret = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+       if (ret == MPI_IOCSTATUS_SUCCESS) {
+               if (extend_page) {
+                       pCfg->cfghdr.ehdr->ExtPageLength =
+                           le16_to_cpu(pReply->ExtPageLength);
+                       pCfg->cfghdr.ehdr->ExtPageType =
+                           pReply->ExtPageType;
+               }
+               pCfg->cfghdr.hdr->PageVersion = pReply->Header.PageVersion;
+               pCfg->cfghdr.hdr->PageLength = pReply->Header.PageLength;
+               pCfg->cfghdr.hdr->PageNumber = pReply->Header.PageNumber;
+               pCfg->cfghdr.hdr->PageType = pReply->Header.PageType;
 
-       return rc;
-}
+       }
 
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- *     mpt_timer_expired - Callback for timer process.
- *     Used only internal config functionality.
- *     @data: Pointer to MPT_SCSI_HOST recast as an unsigned long
- */
-static void
-mpt_timer_expired(unsigned long data)
-{
-       MPT_ADAPTER *ioc = (MPT_ADAPTER *) data;
+       if (retry_count)
+               printk(MYIOC_s_INFO_FMT "Retry completed "
+                   "ret=0x%x timeleft=%ld\n",
+                   ioc->name, ret, timeleft);
 
-       dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired! \n", ioc->name));
+       dcprintk(ioc, printk(KERN_DEBUG "IOCStatus=%04xh, IOCLogInfo=%08xh\n",
+            ret, le32_to_cpu(pReply->IOCLogInfo)));
 
-       /* Perform a FW reload */
-       if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
-               printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
+out:
 
-       /* No more processing.
-        * Hard reset clean-up will wake up
-        * process and free all resources.
-        */
-       dcprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mpt_timer_expired complete!\n", ioc->name));
+       CLEAR_MGMT_STATUS(ioc->mptbase_cmds.status)
+       mutex_unlock(&ioc->mptbase_cmds.mutex);
+       if (issue_hard_reset) {
+               issue_hard_reset = 0;
+               printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+                   ioc->name, __func__);
+               mpt_HardResetHandler(ioc, CAN_SLEEP);
+               mpt_free_msg_frame(ioc, mf);
+               /* attempt one retry for a timed out command */
+               if (!retry_count) {
+                       printk(MYIOC_s_INFO_FMT
+                           "Attempting Retry Config request"
+                           " type 0x%x, page 0x%x,"
+                           " action %d\n", ioc->name, page_type,
+                           pCfg->cfghdr.hdr->PageNumber, pCfg->action);
+                       retry_count++;
+                       goto retry_config;
+               }
+       }
+       return ret;
 
-       return;
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -5998,41 +6513,34 @@ mpt_timer_expired(unsigned long data)
 static int
 mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 {
-       CONFIGPARMS *pCfg;
-       unsigned long flags;
-
-       dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-           ": IOC %s_reset routed to MPT base driver!\n",
-           ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
-           reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
-
-       if (reset_phase == MPT_IOC_SETUP_RESET) {
-               ;
-       } else if (reset_phase == MPT_IOC_PRE_RESET) {
-               /* If the internal config Q is not empty -
-                * delete timer. MF resources will be freed when
-                * the FIFO's are primed.
-                */
-               spin_lock_irqsave(&ioc->FreeQlock, flags);
-               list_for_each_entry(pCfg, &ioc->configQ, linkage)
-                       del_timer(&pCfg->timer);
-               spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
-       } else {
-               CONFIGPARMS *pNext;
-
-               /* Search the configQ for internal commands.
-                * Flush the Q, and wake up all suspended threads.
-                */
-               spin_lock_irqsave(&ioc->FreeQlock, flags);
-               list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) {
-                       list_del(&pCfg->linkage);
-
-                       pCfg->status = MPT_CONFIG_ERROR;
-                       pCfg->wait_done = 1;
-                       wake_up(&mpt_waitq);
+       switch (reset_phase) {
+       case MPT_IOC_SETUP_RESET:
+               ioc->taskmgmt_quiesce_io = 1;
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+               break;
+       case MPT_IOC_PRE_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
+               break;
+       case MPT_IOC_POST_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_POST_RESET\n",  ioc->name, __func__));
+/* wake up mptbase_cmds */
+               if (ioc->mptbase_cmds.status & MPT_MGMT_STATUS_PENDING) {
+                       ioc->mptbase_cmds.status |=
+                           MPT_MGMT_STATUS_DID_IOCRESET;
+                       complete(&ioc->mptbase_cmds.done);
                }
-               spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+/* wake up taskmgmt_cmds */
+               if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+                       ioc->taskmgmt_cmds.status |=
+                               MPT_MGMT_STATUS_DID_IOCRESET;
+                       complete(&ioc->taskmgmt_cmds.done);
+               }
+               break;
+       default:
+               break;
        }
 
        return 1;               /* currently means nothing really */
@@ -6344,6 +6852,59 @@ mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int sh
 
        *size = y;
 }
+/**
+ *     mpt_set_taskmgmt_in_progress_flag - set flags associated with task managment
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *
+ *     Returns 0 for SUCCESS or -1 if FAILED.
+ *
+ *     If -1 is return, then it was not possible to set the flags
+ **/
+int
+mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
+{
+       unsigned long    flags;
+       int              retval;
+
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       if (ioc->ioc_reset_in_progress || ioc->taskmgmt_in_progress ||
+           (ioc->alt_ioc && ioc->alt_ioc->taskmgmt_in_progress)) {
+               retval = -1;
+               goto out;
+       }
+       retval = 0;
+       ioc->taskmgmt_in_progress = 1;
+       ioc->taskmgmt_quiesce_io = 1;
+       if (ioc->alt_ioc) {
+               ioc->alt_ioc->taskmgmt_in_progress = 1;
+               ioc->alt_ioc->taskmgmt_quiesce_io = 1;
+       }
+ out:
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+       return retval;
+}
+EXPORT_SYMBOL(mpt_set_taskmgmt_in_progress_flag);
+
+/**
+ *     mpt_clear_taskmgmt_in_progress_flag - clear flags associated with task managment
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+void
+mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc)
+{
+       unsigned long    flags;
+
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       ioc->taskmgmt_in_progress = 0;
+       ioc->taskmgmt_quiesce_io = 0;
+       if (ioc->alt_ioc) {
+               ioc->alt_ioc->taskmgmt_in_progress = 0;
+               ioc->alt_ioc->taskmgmt_quiesce_io = 0;
+       }
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+}
+EXPORT_SYMBOL(mpt_clear_taskmgmt_in_progress_flag);
 
 
 /**
@@ -6397,7 +6958,9 @@ int
 mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
 {
        int              rc;
+       u8       cb_idx;
        unsigned long    flags;
+       unsigned long    time_count;
 
        dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler Entered!\n", ioc->name));
 #ifdef MFCNT
@@ -6410,14 +6973,15 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
        /* Reset the adapter. Prevent more than 1 call to
         * mpt_do_ioc_recovery at any instant in time.
         */
-       spin_lock_irqsave(&ioc->diagLock, flags);
-       if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){
-               spin_unlock_irqrestore(&ioc->diagLock, flags);
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       if (ioc->ioc_reset_in_progress) {
+               spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
                return 0;
-       } else {
-               ioc->diagPending = 1;
        }
-       spin_unlock_irqrestore(&ioc->diagLock, flags);
+       ioc->ioc_reset_in_progress = 1;
+       if (ioc->alt_ioc)
+               ioc->alt_ioc->ioc_reset_in_progress = 1;
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 
        /* FIXME: If do_ioc_recovery fails, repeat....
         */
@@ -6427,47 +6991,57 @@ mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
         * Prevents timeouts occurring during a diagnostic reset...very bad.
         * For all other protocol drivers, this is a no-op.
         */
-       {
-               u8       cb_idx;
-               int      r = 0;
-
-               for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
-                       if (MptResetHandlers[cb_idx]) {
-                               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling IOC reset_setup handler #%d\n",
-                                               ioc->name, cb_idx));
-                               r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
-                               if (ioc->alt_ioc) {
-                                       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling alt-%s setup reset handler #%d\n",
-                                                       ioc->name, ioc->alt_ioc->name, cb_idx));
-                                       r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_SETUP_RESET);
-                               }
-                       }
+       for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+               if (MptResetHandlers[cb_idx]) {
+                       mpt_signal_reset(cb_idx, ioc, MPT_IOC_SETUP_RESET);
+                       if (ioc->alt_ioc)
+                               mpt_signal_reset(cb_idx, ioc->alt_ioc,
+                                       MPT_IOC_SETUP_RESET);
                }
        }
 
-       if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) {
-               printk(MYIOC_s_WARN_FMT "Cannot recover rc = %d!\n", ioc->name, rc);
+       time_count = jiffies;
+       rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag);
+       if (rc != 0) {
+               printk(KERN_WARNING MYNAM
+                   ": WARNING - (%d) Cannot recover %s\n", rc, ioc->name);
+       } else {
+               if (ioc->hard_resets < -1)
+                       ioc->hard_resets++;
        }
-       ioc->reload_fw = 0;
-       if (ioc->alt_ioc)
-               ioc->alt_ioc->reload_fw = 0;
 
-       spin_lock_irqsave(&ioc->diagLock, flags);
-       ioc->diagPending = 0;
-       if (ioc->alt_ioc)
-               ioc->alt_ioc->diagPending = 0;
-       spin_unlock_irqrestore(&ioc->diagLock, flags);
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       ioc->ioc_reset_in_progress = 0;
+       ioc->taskmgmt_quiesce_io = 0;
+       ioc->taskmgmt_in_progress = 0;
+       if (ioc->alt_ioc) {
+               ioc->alt_ioc->ioc_reset_in_progress = 0;
+               ioc->alt_ioc->taskmgmt_quiesce_io = 0;
+               ioc->alt_ioc->taskmgmt_in_progress = 0;
+       }
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 
-       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "HardResetHandler rc = %d!\n", ioc->name, rc));
+       dtmprintk(ioc,
+           printk(MYIOC_s_DEBUG_FMT
+               "HardResetHandler: completed (%d seconds): %s\n", ioc->name,
+               jiffies_to_msecs(jiffies - time_count)/1000, ((rc == 0) ?
+               "SUCCESS" : "FAILED")));
 
        return rc;
 }
 
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#ifdef CONFIG_FUSION_LOGGING
 static void
-EventDescriptionStr(u8 event, u32 evData0, char *evStr)
+mpt_display_event_info(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply)
 {
        char *ds = NULL;
+       u32 evData0;
+       int ii;
+       u8 event;
+       char *evStr = ioc->evStr;
+
+       event = le32_to_cpu(pEventReply->Event) & 0xFF;
+       evData0 = le32_to_cpu(pEventReply->Data[0]);
 
        switch(event) {
        case MPI_EVENT_NONE:
@@ -6501,9 +7075,9 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
                if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
                        ds = "Loop State(LIP) Change";
                else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
-                       ds = "Loop State(LPE) Change";          /* ??? */
+                       ds = "Loop State(LPE) Change";
                else
-                       ds = "Loop State(LPB) Change";          /* ??? */
+                       ds = "Loop State(LPB) Change";
                break;
        case MPI_EVENT_LOGOUT:
                ds = "Logout";
@@ -6703,28 +7277,65 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
        }
        case MPI_EVENT_IR2:
        {
+               u8 id = (u8)(evData0);
+               u8 channel = (u8)(evData0 >> 8);
+               u8 phys_num = (u8)(evData0 >> 24);
                u8 ReasonCode = (u8)(evData0 >> 16);
+
                switch (ReasonCode) {
                case MPI_EVENT_IR2_RC_LD_STATE_CHANGED:
-                       ds = "IR2: LD State Changed";
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: LD State Changed: "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
                        break;
                case MPI_EVENT_IR2_RC_PD_STATE_CHANGED:
-                       ds = "IR2: PD State Changed";
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: PD State Changed "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
                        break;
                case MPI_EVENT_IR2_RC_BAD_BLOCK_TABLE_FULL:
-                       ds = "IR2: Bad Block Table Full";
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: Bad Block Table Full: "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
                        break;
                case MPI_EVENT_IR2_RC_PD_INSERTED:
-                       ds = "IR2: PD Inserted";
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: PD Inserted: "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
                        break;
                case MPI_EVENT_IR2_RC_PD_REMOVED:
-                       ds = "IR2: PD Removed";
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: PD Removed: "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
                        break;
                case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
-                       ds = "IR2: Foreign CFG Detected";
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: Foreign CFG Detected: "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
                        break;
                case MPI_EVENT_IR2_RC_REBUILD_MEDIUM_ERROR:
-                       ds = "IR2: Rebuild Medium Error";
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: Rebuild Medium Error: "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
+                       break;
+               case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: Dual Port Added: "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
+                       break;
+               case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
+                       snprintf(evStr, EVENT_DESCR_STR_SZ,
+                           "IR2: Dual Port Removed: "
+                           "id=%d channel=%d phys_num=%d",
+                           id, channel, phys_num);
                        break;
                default:
                        ds = "IR2";
@@ -6760,13 +7371,18 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
        case MPI_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
        {
                u8 reason = (u8)(evData0);
-               u8 port_num = (u8)(evData0 >> 8);
-               u16 handle = le16_to_cpu(evData0 >> 16);
 
-               snprintf(evStr, EVENT_DESCR_STR_SZ,
-                   "SAS Initiator Device Status Change: reason=0x%02x "
-                   "port=%d handle=0x%04x",
-                   reason, port_num, handle);
+               switch (reason) {
+               case MPI_EVENT_SAS_INIT_RC_ADDED:
+                       ds = "SAS Initiator Status Change: Added";
+                       break;
+               case MPI_EVENT_SAS_INIT_RC_REMOVED:
+                       ds = "SAS Initiator Status Change: Deleted";
+                       break;
+               default:
+                       ds = "SAS Initiator Status Change";
+                       break;
+               }
                break;
        }
 
@@ -6814,6 +7430,24 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
                break;
        }
 
+       case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+       {
+               u8 reason = (u8)(evData0);
+
+               switch (reason) {
+               case MPI_EVENT_SAS_EXP_RC_ADDED:
+                       ds = "Expander Status Change: Added";
+                       break;
+               case MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING:
+                       ds = "Expander Status Change: Deleted";
+                       break;
+               default:
+                       ds = "Expander Status Change";
+                       break;
+               }
+               break;
+       }
+
        /*
         *  MPT base "custom" events may be added here...
         */
@@ -6823,8 +7457,20 @@ EventDescriptionStr(u8 event, u32 evData0, char *evStr)
        }
        if (ds)
                strncpy(evStr, ds, EVENT_DESCR_STR_SZ);
-}
 
+
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "MPT event:(%02Xh) : %s\n",
+           ioc->name, event, evStr));
+
+       devtverboseprintk(ioc, printk(KERN_DEBUG MYNAM
+           ": Event data:\n"));
+       for (ii = 0; ii < le16_to_cpu(pEventReply->EventDataLength); ii++)
+               devtverboseprintk(ioc, printk(" %08x",
+                   le32_to_cpu(pEventReply->Data[ii])));
+       devtverboseprintk(ioc, printk(KERN_DEBUG "\n"));
+}
+#endif
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
  *     ProcessEventNotification - Route EventNotificationReply to all event handlers
@@ -6841,37 +7487,24 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
 {
        u16 evDataLen;
        u32 evData0 = 0;
-//     u32 evCtx;
        int ii;
        u8 cb_idx;
        int r = 0;
        int handlers = 0;
-       char evStr[EVENT_DESCR_STR_SZ];
        u8 event;
 
        /*
         *  Do platform normalization of values
         */
        event = le32_to_cpu(pEventReply->Event) & 0xFF;
-//     evCtx = le32_to_cpu(pEventReply->EventContext);
        evDataLen = le16_to_cpu(pEventReply->EventDataLength);
        if (evDataLen) {
                evData0 = le32_to_cpu(pEventReply->Data[0]);
        }
 
-       EventDescriptionStr(event, evData0, evStr);
-       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event:(%02Xh) : %s\n",
-                       ioc->name,
-                       event,
-                       evStr));
-
 #ifdef CONFIG_FUSION_LOGGING
-       devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-           ": Event data:\n", ioc->name));
-       for (ii = 0; ii < evDataLen; ii++)
-               devtverboseprintk(ioc, printk(" %08x",
-                   le32_to_cpu(pEventReply->Data[ii])));
-       devtverboseprintk(ioc, printk("\n"));
+       if (evDataLen)
+               mpt_display_event_info(ioc, pEventReply);
 #endif
 
        /*
@@ -6926,8 +7559,9 @@ ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply
         */
        for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
                if (MptEvHandlers[cb_idx]) {
-                       devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Routing Event to event handler #%d\n",
-                                       ioc->name, cb_idx));
+                       devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                           "Routing Event to event handler #%d\n",
+                           ioc->name, cb_idx));
                        r += (*(MptEvHandlers[cb_idx]))(ioc, pEventReply);
                        handlers++;
                }
@@ -7011,8 +7645,6 @@ mpt_spi_log_info(MPT_ADAPTER *ioc, u32 log_info)
        switch (info) {
        case 0x00010000:
                desc = "bug! MID not found";
-               if (ioc->reload_fw == 0)
-                       ioc->reload_fw++;
                break;
 
        case 0x00020000:
@@ -7613,7 +8245,6 @@ EXPORT_SYMBOL(mpt_get_msg_frame);
 EXPORT_SYMBOL(mpt_put_msg_frame);
 EXPORT_SYMBOL(mpt_put_msg_frame_hi_pri);
 EXPORT_SYMBOL(mpt_free_msg_frame);
-EXPORT_SYMBOL(mpt_add_sge);
 EXPORT_SYMBOL(mpt_send_handshake_request);
 EXPORT_SYMBOL(mpt_verify_adapter);
 EXPORT_SYMBOL(mpt_GetIocState);
@@ -7650,7 +8281,7 @@ fusion_init(void)
        /*  Register ourselves (mptbase) in order to facilitate
         *  EventNotification handling.
         */
-       mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER);
+       mpt_base_index = mpt_register(mptbase_reply, MPTBASE_DRIVER);
 
        /* Register for hard reset handling callbacks.
         */
index b3e981d2a506fd6aeabd27795e7b492eb466344a..1c8514dc31ca3ec51df54ca2a9711f6fb085b02f 100644 (file)
@@ -76,8 +76,8 @@
 #define COPYRIGHT      "Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON       "3.04.07"
-#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.04.07"
+#define MPT_LINUX_VERSION_COMMON       "3.04.10"
+#define MPT_LINUX_PACKAGE_NAME         "@(#)mptlinux-3.04.09"
 #define WHAT_MAGIC_STRING              "@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
 #endif
 
 #define MPT_NAME_LENGTH                        32
+#define MPT_KOBJ_NAME_LEN              20
 
 #define MPT_PROCFS_MPTBASEDIR          "mpt"
                                                /* chg it to "driver/fusion" ? */
 
 #define MPT_COALESCING_TIMEOUT         0x10
 
+
 /*
  * SCSI transfer rate defines.
  */
 /*
  * Set the MAX_SGE value based on user input.
  */
-#ifdef  CONFIG_FUSION_MAX_SGE
-#if     CONFIG_FUSION_MAX_SGE  < 16
+#ifdef CONFIG_FUSION_MAX_SGE
+#if CONFIG_FUSION_MAX_SGE  < 16
 #define MPT_SCSI_SG_DEPTH      16
-#elif   CONFIG_FUSION_MAX_SGE  > 128
+#elif CONFIG_FUSION_MAX_SGE  > 128
 #define MPT_SCSI_SG_DEPTH      128
 #else
 #define MPT_SCSI_SG_DEPTH      CONFIG_FUSION_MAX_SGE
 #define MPT_SCSI_SG_DEPTH      40
 #endif
 
+#ifdef CONFIG_FUSION_MAX_FC_SGE
+#if CONFIG_FUSION_MAX_FC_SGE  < 16
+#define MPT_SCSI_FC_SG_DEPTH   16
+#elif CONFIG_FUSION_MAX_FC_SGE  > 256
+#define MPT_SCSI_FC_SG_DEPTH   256
+#else
+#define MPT_SCSI_FC_SG_DEPTH   CONFIG_FUSION_MAX_FC_SGE
+#endif
+#else
+#define MPT_SCSI_FC_SG_DEPTH   40
+#endif
+
 /* debug print string length used for events and iocstatus */
 # define EVENT_DESCR_STR_SZ             100
 
@@ -431,38 +445,36 @@ do { \
  *     IOCTL structure and associated defines
  */
 
-#define MPT_IOCTL_STATUS_DID_IOCRESET  0x01    /* IOC Reset occurred on the current*/
-#define MPT_IOCTL_STATUS_RF_VALID      0x02    /* The Reply Frame is VALID */
-#define MPT_IOCTL_STATUS_TIMER_ACTIVE  0x04    /* The timer is running */
-#define MPT_IOCTL_STATUS_SENSE_VALID   0x08    /* Sense data is valid */
-#define MPT_IOCTL_STATUS_COMMAND_GOOD  0x10    /* Command Status GOOD */
-#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE        0x20    /* The TM timer is running */
-#define MPT_IOCTL_STATUS_TM_FAILED     0x40    /* User TM request failed */
-
 #define MPTCTL_RESET_OK                        0x01    /* Issue Bus Reset */
 
-typedef struct _MPT_IOCTL {
-       struct _MPT_ADAPTER     *ioc;
-       u8                       ReplyFrame[MPT_DEFAULT_FRAME_SIZE];    /* reply frame data */
-       u8                       sense[MPT_SENSE_BUFFER_ALLOC];
-       int                      wait_done;     /* wake-up value for this ioc */
-       u8                       rsvd;
-       u8                       status;        /* current command status */
-       u8                       reset;         /* 1 if bus reset allowed */
-       u8                       id;            /* target for reset */
-       struct mutex             ioctl_mutex;
-} MPT_IOCTL;
-
-#define MPT_SAS_MGMT_STATUS_RF_VALID   0x02    /* The Reply Frame is VALID */
-#define MPT_SAS_MGMT_STATUS_COMMAND_GOOD       0x10    /* Command Status GOOD */
-#define MPT_SAS_MGMT_STATUS_TM_FAILED  0x40    /* User TM request failed */
-
-typedef struct _MPT_SAS_MGMT {
+#define MPT_MGMT_STATUS_RF_VALID       0x01    /* The Reply Frame is VALID */
+#define MPT_MGMT_STATUS_COMMAND_GOOD   0x02    /* Command Status GOOD */
+#define MPT_MGMT_STATUS_PENDING                0x04    /* command is pending */
+#define MPT_MGMT_STATUS_DID_IOCRESET   0x08    /* IOC Reset occurred
+                                                  on the current*/
+#define MPT_MGMT_STATUS_SENSE_VALID    0x10    /* valid sense info */
+#define MPT_MGMT_STATUS_TIMER_ACTIVE   0x20    /* obsolete */
+#define MPT_MGMT_STATUS_FREE_MF                0x40    /* free the mf from
+                                                  complete routine */
+
+#define INITIALIZE_MGMT_STATUS(status) \
+       status = MPT_MGMT_STATUS_PENDING;
+#define CLEAR_MGMT_STATUS(status) \
+       status = 0;
+#define CLEAR_MGMT_PENDING_STATUS(status) \
+       status &= ~MPT_MGMT_STATUS_PENDING;
+#define SET_MGMT_MSG_CONTEXT(msg_context, value) \
+       msg_context = value;
+
+typedef struct _MPT_MGMT {
        struct mutex             mutex;
        struct completion        done;
        u8                       reply[MPT_DEFAULT_FRAME_SIZE]; /* reply frame data */
+       u8                       sense[MPT_SENSE_BUFFER_ALLOC];
        u8                       status;        /* current command status */
-}MPT_SAS_MGMT;
+       int                      completion_code;
+       u32                      msg_context;
+} MPT_MGMT;
 
 /*
  *  Event Structure and define
@@ -564,6 +576,10 @@ struct mptfc_rport_info
        u8              flags;
 };
 
+typedef void (*MPT_ADD_SGE)(void *pAddr, u32 flagslength, dma_addr_t dma_addr);
+typedef void (*MPT_ADD_CHAIN)(void *pAddr, u8 next, u16 length,
+               dma_addr_t dma_addr);
+
 /*
  *  Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
  */
@@ -573,6 +589,10 @@ typedef struct _MPT_ADAPTER
        int                      pci_irq;       /* This irq           */
        char                     name[MPT_NAME_LENGTH]; /* "iocN"             */
        char                     prod_name[MPT_NAME_LENGTH];    /* "LSIFC9x9"         */
+#ifdef CONFIG_FUSION_LOGGING
+       /* used in mpt_display_event_info */
+       char                     evStr[EVENT_DESCR_STR_SZ];
+#endif
        char                     board_name[16];
        char                     board_assembly[16];
        char                     board_tracer[16];
@@ -600,6 +620,10 @@ typedef struct _MPT_ADAPTER
        int                      reply_depth;   /* Num Allocated reply frames */
        int                      reply_sz;      /* Reply frame size */
        int                      num_chain;     /* Number of chain buffers */
+       MPT_ADD_SGE              add_sge;       /* Pointer to add_sge
+                                                  function */
+       MPT_ADD_CHAIN            add_chain;     /* Pointer to add_chain
+                                                  function */
                /* Pool of buffers for chaining. ReqToChain
                 * and ChainToChain track index of chain buffers.
                 * ChainBuffer (DMA) virt/phys addresses.
@@ -640,11 +664,8 @@ typedef struct _MPT_ADAPTER
        RaidCfgData             raid_data;      /* Raid config. data */
        SasCfgData              sas_data;       /* Sas config. data */
        FcCfgData               fc_data;        /* Fc config. data */
-       MPT_IOCTL               *ioctl;         /* ioctl data pointer */
        struct proc_dir_entry   *ioc_dentry;
        struct _MPT_ADAPTER     *alt_ioc;       /* ptr to 929 bound adapter port */
-       spinlock_t               diagLock;      /* diagnostic reset lock */
-       int                      diagPending;
        u32                      biosVersion;   /* BIOS version from IO Unit Page 2 */
        int                      eventTypes;    /* Event logging parameters */
        int                      eventContext;  /* Next event context */
@@ -652,7 +673,6 @@ typedef struct _MPT_ADAPTER
        struct _mpt_ioctl_events *events;       /* pointer to event log */
        u8                      *cached_fw;     /* Pointer to FW */
        dma_addr_t              cached_fw_dma;
-       struct list_head         configQ;       /* linked list of config. requests */
        int                      hs_reply_idx;
 #ifndef MFCNT
        u32                      pad0;
@@ -665,9 +685,6 @@ typedef struct _MPT_ADAPTER
        IOCFactsReply_t          facts;
        PortFactsReply_t         pfacts[2];
        FCPortPage0_t            fc_port_page0[2];
-       struct timer_list        persist_timer; /* persist table timer */
-       int                      persist_wait_done; /* persist completion flag */
-       u8                       persist_reply_frame[MPT_DEFAULT_FRAME_SIZE]; /* persist reply */
        LANPage0_t               lan_cnfg_page0;
        LANPage1_t               lan_cnfg_page1;
 
@@ -682,23 +699,44 @@ typedef struct _MPT_ADAPTER
        int                      aen_event_read_flag; /* flag to indicate event log was read*/
        u8                       FirstWhoInit;
        u8                       upload_fw;     /* If set, do a fw upload */
-       u8                       reload_fw;     /* Force a FW Reload on next reset */
        u8                       NBShiftFactor;  /* NB Shift Factor based on Block Size (Facts)  */
        u8                       pad1[4];
        u8                       DoneCtx;
        u8                       TaskCtx;
        u8                       InternalCtx;
-       spinlock_t               initializing_hba_lock;
-       int                      initializing_hba_lock_flag;
        struct list_head         list;
        struct net_device       *netdev;
        struct list_head         sas_topology;
        struct mutex             sas_topology_mutex;
+
+       struct workqueue_struct *fw_event_q;
+       struct list_head         fw_event_list;
+       spinlock_t               fw_event_lock;
+       u8                       fw_events_off; /* if '1', then ignore events */
+       char                     fw_event_q_name[MPT_KOBJ_NAME_LEN];
+
        struct mutex             sas_discovery_mutex;
        u8                       sas_discovery_runtime;
        u8                       sas_discovery_ignore_events;
+
+       /* port_info object for the host */
+       struct mptsas_portinfo  *hba_port_info;
+       u64                      hba_port_sas_addr;
+       u16                      hba_port_num_phy;
+       struct list_head         sas_device_info_list;
+       struct mutex             sas_device_info_mutex;
+       u8                       old_sas_discovery_protocal;
+       u8                       sas_discovery_quiesce_io;
        int                      sas_index; /* index refrencing */
-       MPT_SAS_MGMT             sas_mgmt;
+       MPT_MGMT                 sas_mgmt;
+       MPT_MGMT                 mptbase_cmds; /* for sending config pages */
+       MPT_MGMT                 internal_cmds;
+       MPT_MGMT                 taskmgmt_cmds;
+       MPT_MGMT                 ioctl_cmds;
+       spinlock_t               taskmgmt_lock; /* diagnostic reset lock */
+       int                      taskmgmt_in_progress;
+       u8                       taskmgmt_quiesce_io;
+       u8                       ioc_reset_in_progress;
        struct work_struct       sas_persist_task;
 
        struct work_struct       fc_setup_reset_work;
@@ -707,15 +745,27 @@ typedef struct _MPT_ADAPTER
        u8                       fc_link_speed[2];
        spinlock_t               fc_rescan_work_lock;
        struct work_struct       fc_rescan_work;
-       char                     fc_rescan_work_q_name[20];
+       char                     fc_rescan_work_q_name[MPT_KOBJ_NAME_LEN];
        struct workqueue_struct *fc_rescan_work_q;
+
+       /* driver forced bus resets count */
+       unsigned long             hard_resets;
+       /* fw/external bus resets count */
+       unsigned long             soft_resets;
+       /* cmd timeouts */
+       unsigned long             timeouts;
+
        struct scsi_cmnd        **ScsiLookup;
        spinlock_t                scsi_lookup_lock;
-
-       char                     reset_work_q_name[20];
+       u64                     dma_mask;
+       u32                       broadcast_aen_busy;
+       char                     reset_work_q_name[MPT_KOBJ_NAME_LEN];
        struct workqueue_struct *reset_work_q;
        struct delayed_work      fault_reset_work;
-       spinlock_t               fault_reset_work_lock;
+
+       u8                      sg_addr_size;
+       u8                      in_rescan;
+       u8                      SGE_size;
 
 } MPT_ADAPTER;
 
@@ -753,13 +803,14 @@ typedef struct _mpt_sge {
        dma_addr_t      Address;
 } MptSge_t;
 
-#define mpt_addr_size() \
-       ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
-               MPI_SGE_FLAGS_32_BIT_ADDRESSING)
 
-#define mpt_msg_flags() \
-       ((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
-               MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32)
+#define mpt_msg_flags(ioc) \
+       (ioc->sg_addr_size == sizeof(u64)) ?            \
+       MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 :             \
+       MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32
+
+#define MPT_SGE_FLAGS_64_BIT_ADDRESSING \
+       (MPI_SGE_FLAGS_64_BIT_ADDRESSING << MPI_SGE_FLAGS_SHIFT)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*
@@ -835,22 +886,14 @@ typedef struct _MPT_SCSI_HOST {
                /* Pool of memory for holding SCpnts before doing
                 * OS callbacks. freeQ is the free pool.
                 */
-       u8                        tmPending;
-       u8                        resetPending;
        u8                        negoNvram;            /* DV disabled, nego NVRAM */
        u8                        pad1;
-       u8                        tmState;
        u8                        rsvd[2];
        MPT_FRAME_HDR            *cmdPtr;               /* Ptr to nonOS request */
        struct scsi_cmnd         *abortSCpnt;
        MPT_LOCAL_REPLY           localReply;           /* internal cmd reply struct */
-       unsigned long             hard_resets;          /* driver forced bus resets count */
-       unsigned long             soft_resets;          /* fw/external bus resets count */
-       unsigned long             timeouts;             /* cmd timeouts */
        ushort                    sel_timeout[MPT_MAX_FC_DEVICES];
        char                      *info_kbuf;
-       wait_queue_head_t         scandv_waitq;
-       int                       scandv_wait_done;
        long                      last_queue_full;
        u16                       tm_iocstatus;
        u16                       spi_pending;
@@ -870,21 +913,16 @@ struct scsi_cmnd;
  * Generic structure passed to the base mpt_config function.
  */
 typedef struct _x_config_parms {
-       struct list_head         linkage;       /* linked list */
-       struct timer_list        timer;         /* timer function for this request  */
        union {
                ConfigExtendedPageHeader_t      *ehdr;
                ConfigPageHeader_t      *hdr;
        } cfghdr;
        dma_addr_t               physAddr;
-       int                      wait_done;     /* wait for this request */
        u32                      pageAddr;      /* properly formatted */
+       u16                      status;
        u8                       action;
        u8                       dir;
        u8                       timeout;       /* seconds */
-       u8                       pad1;
-       u16                      status;
-       u16                      pad2;
 } CONFIGPARMS;
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -909,7 +947,6 @@ extern MPT_FRAME_HDR        *mpt_get_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc);
 extern void     mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
 extern void     mpt_put_msg_frame(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
 extern void     mpt_put_msg_frame_hi_pri(u8 cb_idx, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
-extern void     mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
 
 extern int      mpt_send_handshake_request(u8 cb_idx, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
 extern int      mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
@@ -922,6 +959,12 @@ extern void         mpt_free_fw_memory(MPT_ADAPTER *ioc);
 extern int      mpt_findImVolumes(MPT_ADAPTER *ioc);
 extern int      mptbase_sas_persist_operation(MPT_ADAPTER *ioc, u8 persist_opcode);
 extern int      mpt_raid_phys_disk_pg0(MPT_ADAPTER *ioc, u8 phys_disk_num, pRaidPhysDiskPage0_t phys_disk);
+extern int     mpt_raid_phys_disk_pg1(MPT_ADAPTER *ioc, u8 phys_disk_num,
+               pRaidPhysDiskPage1_t phys_disk);
+extern int     mpt_raid_phys_disk_get_num_paths(MPT_ADAPTER *ioc,
+               u8 phys_disk_num);
+extern int      mpt_set_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
+extern void     mpt_clear_taskmgmt_in_progress_flag(MPT_ADAPTER *ioc);
 extern void     mpt_halt_firmware(MPT_ADAPTER *ioc);
 
 
@@ -959,7 +1002,6 @@ extern int mpt_fwfault_debug;
 #define MPT_SGE_FLAGS_END_OF_BUFFER            (0x40000000)
 #define MPT_SGE_FLAGS_LOCAL_ADDRESS            (0x08000000)
 #define MPT_SGE_FLAGS_DIRECTION                        (0x04000000)
-#define MPT_SGE_FLAGS_ADDRESSING               (mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
 #define MPT_SGE_FLAGS_END_OF_LIST              (0x01000000)
 
 #define MPT_SGE_FLAGS_TRANSACTION_ELEMENT      (0x00000000)
@@ -972,14 +1014,12 @@ extern int mpt_fwfault_debug;
         MPT_SGE_FLAGS_END_OF_BUFFER |  \
         MPT_SGE_FLAGS_END_OF_LIST |    \
         MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
-        MPT_SGE_FLAGS_ADDRESSING | \
         MPT_TRANSFER_IOC_TO_HOST)
 #define MPT_SGE_FLAGS_SSIMPLE_WRITE \
        (MPT_SGE_FLAGS_LAST_ELEMENT |   \
         MPT_SGE_FLAGS_END_OF_BUFFER |  \
         MPT_SGE_FLAGS_END_OF_LIST |    \
         MPT_SGE_FLAGS_SIMPLE_ELEMENT | \
-        MPT_SGE_FLAGS_ADDRESSING | \
         MPT_TRANSFER_HOST_TO_IOC)
 
 /*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
index c63817117c0a881a3855fad1399790429b2da3cc..9b2e2198aee9dd07c12328c026fd7712bac95c03 100644 (file)
@@ -84,6 +84,7 @@ MODULE_VERSION(my_VERSION);
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 
 static u8 mptctl_id = MPT_MAX_PROTOCOL_DRIVERS;
+static u8 mptctl_taskmgmt_id = MPT_MAX_PROTOCOL_DRIVERS;
 
 static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
 
@@ -127,10 +128,7 @@ static MptSge_t *kbuf_alloc_2_sgl(int bytes, u32 dir, int sge_offset, int *frags
                struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
 static void kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma,
                struct buflist *buflist, MPT_ADAPTER *ioc);
-static void mptctl_timeout_expired (MPT_IOCTL *ioctl);
-static int  mptctl_bus_reset(MPT_IOCTL *ioctl);
-static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd);
-static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
+static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function);
 
 /*
  * Reset Handler cleanup function
@@ -183,10 +181,10 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
        int rc = 0;
 
        if (nonblock) {
-               if (!mutex_trylock(&ioc->ioctl->ioctl_mutex))
+               if (!mutex_trylock(&ioc->ioctl_cmds.mutex))
                        rc = -EAGAIN;
        } else {
-               if (mutex_lock_interruptible(&ioc->ioctl->ioctl_mutex))
+               if (mutex_lock_interruptible(&ioc->ioctl_cmds.mutex))
                        rc = -ERESTARTSYS;
        }
        return rc;
@@ -202,99 +200,78 @@ mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
 static int
 mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
 {
-       char *sense_data;
-       int sz, req_index;
-       u16 iocStatus;
-       u8 cmd;
+       char    *sense_data;
+       int     req_index;
+       int     sz;
 
-       if (req)
-                cmd = req->u.hdr.Function;
-       else
-               return 1;
-       dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tcompleting mpi function (0x%02X), req=%p, "
-           "reply=%p\n", ioc->name,  req->u.hdr.Function, req, reply));
-
-       if (ioc->ioctl) {
-
-               if (reply==NULL) {
-
-                       dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "mptctl_reply() NULL Reply "
-                               "Function=%x!\n", ioc->name, cmd));
+       if (!req)
+               return 0;
 
-                       ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
-                       ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
+       dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "completing mpi function "
+           "(0x%02X), req=%p, reply=%p\n", ioc->name,  req->u.hdr.Function,
+           req, reply));
 
-                       /* We are done, issue wake up
-                       */
-                       ioc->ioctl->wait_done = 1;
-                       wake_up (&mptctl_wait);
-                       return 1;
+       /*
+        * Handling continuation of the same reply. Processing the first
+        * reply, and eating the other replys that come later.
+        */
+       if (ioc->ioctl_cmds.msg_context != req->u.hdr.MsgContext)
+               goto out_continuation;
 
-               }
+       ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
 
-               /* Copy the reply frame (which much exist
-                * for non-SCSI I/O) to the IOC structure.
-                */
-               memcpy(ioc->ioctl->ReplyFrame, reply,
-                       min(ioc->reply_sz, 4*reply->u.reply.MsgLength));
-               ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID;
+       if (!reply)
+               goto out;
 
-               /* Set the command status to GOOD if IOC Status is GOOD
-                * OR if SCSI I/O cmd and data underrun or recovered error.
-                */
-               iocStatus = le16_to_cpu(reply->u.reply.IOCStatus) & MPI_IOCSTATUS_MASK;
-               if (iocStatus  == MPI_IOCSTATUS_SUCCESS)
-                       ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
-
-               if (iocStatus || reply->u.reply.IOCLogInfo)
-                       dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "\tiocstatus (0x%04X), "
-                               "loginfo (0x%08X)\n", ioc->name,
-                               iocStatus,
-                               le32_to_cpu(reply->u.reply.IOCLogInfo)));
-
-               if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
-                       (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
-
-                       if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
-                               dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-                                       "\tscsi_status (0x%02x), scsi_state (0x%02x), "
-                                       "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
-                                       reply->u.sreply.SCSIStatus,
-                                       reply->u.sreply.SCSIState,
-                                       le16_to_cpu(reply->u.sreply.TaskTag),
-                                       le32_to_cpu(reply->u.sreply.TransferCount)));
-
-                       ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
-
-                       if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) ||
-                       (iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) {
-                       ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
-                       }
-               }
+       ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+       sz = min(ioc->reply_sz, 4*reply->u.reply.MsgLength);
+       memcpy(ioc->ioctl_cmds.reply, reply, sz);
 
-               /* Copy the sense data - if present
-                */
-               if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) &&
-                       (reply->u.sreply.SCSIState &
-                        MPI_SCSI_STATE_AUTOSENSE_VALID)){
+       if (reply->u.reply.IOCStatus || reply->u.reply.IOCLogInfo)
+               dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "iocstatus (0x%04X), loginfo (0x%08X)\n", ioc->name,
+                   le16_to_cpu(reply->u.reply.IOCStatus),
+                   le32_to_cpu(reply->u.reply.IOCLogInfo)));
+
+       if ((req->u.hdr.Function == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+               (req->u.hdr.Function ==
+                MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+
+               if (reply->u.sreply.SCSIStatus || reply->u.sreply.SCSIState)
+                       dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                       "scsi_status (0x%02x), scsi_state (0x%02x), "
+                       "tag = (0x%04x), transfer_count (0x%08x)\n", ioc->name,
+                       reply->u.sreply.SCSIStatus,
+                       reply->u.sreply.SCSIState,
+                       le16_to_cpu(reply->u.sreply.TaskTag),
+                       le32_to_cpu(reply->u.sreply.TransferCount)));
+
+               if (reply->u.sreply.SCSIState &
+                       MPI_SCSI_STATE_AUTOSENSE_VALID) {
                        sz = req->u.scsireq.SenseBufferLength;
                        req_index =
                            le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
-                       sense_data =
-                           ((u8 *)ioc->sense_buf_pool +
+                       sense_data = ((u8 *)ioc->sense_buf_pool +
                             (req_index * MPT_SENSE_BUFFER_ALLOC));
-                       memcpy(ioc->ioctl->sense, sense_data, sz);
-                       ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID;
+                       memcpy(ioc->ioctl_cmds.sense, sense_data, sz);
+                       ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_SENSE_VALID;
                }
+       }
 
-               if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT)
-                       mptctl_free_tm_flags(ioc);
-
-               /* We are done, issue wake up
-                */
-               ioc->ioctl->wait_done = 1;
-               wake_up (&mptctl_wait);
+ out:
+       /* We are done, issue wake up
+        */
+       if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
+               if (req->u.hdr.Function == MPI_FUNCTION_SCSI_TASK_MGMT)
+                       mpt_clear_taskmgmt_in_progress_flag(ioc);
+               ioc->ioctl_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+               complete(&ioc->ioctl_cmds.done);
        }
+
+ out_continuation:
+       if (reply && (reply->u.reply.MsgFlags &
+           MPI_MSGFLAGS_CONTINUATION_REPLY))
+               return 0;
        return 1;
 }
 
@@ -304,30 +281,66 @@ mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
  * Expecting an interrupt, however timed out.
  *
  */
-static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
+static void
+mptctl_timeout_expired(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
 {
-       int rc = 1;
+       unsigned long flags;
 
-       if (ioctl == NULL)
-               return;
-       dctlprintk(ioctl->ioc,
-                  printk(MYIOC_s_DEBUG_FMT ": Timeout Expired! Host %d\n",
-                  ioctl->ioc->name, ioctl->ioc->id));
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": %s\n",
+               ioc->name, __func__));
 
-       ioctl->wait_done = 0;
-       if (ioctl->reset & MPTCTL_RESET_OK)
-               rc = mptctl_bus_reset(ioctl);
+       if (mpt_fwfault_debug)
+               mpt_halt_firmware(ioc);
 
-       if (rc) {
-               /* Issue a reset for this device.
-                * The IOC is not responding.
-                */
-               dctlprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
-                        ioctl->ioc->name));
-               mpt_HardResetHandler(ioctl->ioc, CAN_SLEEP);
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       if (ioc->ioc_reset_in_progress) {
+               spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+               CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+               mpt_free_msg_frame(ioc, mf);
+               return;
        }
-       return;
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
 
+       if (!mptctl_bus_reset(ioc, mf->u.hdr.Function))
+               return;
+
+       /* Issue a reset for this device.
+        * The IOC is not responding.
+        */
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
+                ioc->name));
+       CLEAR_MGMT_PENDING_STATUS(ioc->ioctl_cmds.status)
+       mpt_HardResetHandler(ioc, CAN_SLEEP);
+       mpt_free_msg_frame(ioc, mf);
+}
+
+static int
+mptctl_taskmgmt_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+{
+       if (!mf)
+               return 0;
+
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+               "TaskMgmt completed (mf=%p, mr=%p)\n",
+               ioc->name, mf, mr));
+
+       ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+
+       if (!mr)
+               goto out;
+
+       ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+       memcpy(ioc->taskmgmt_cmds.reply, mr,
+           min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
+ out:
+       if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+               mpt_clear_taskmgmt_in_progress_flag(ioc);
+               ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+               complete(&ioc->taskmgmt_cmds.done);
+               return 1;
+       }
+       return 0;
 }
 
 /* mptctl_bus_reset
@@ -335,133 +348,150 @@ static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
  * Bus reset code.
  *
  */
-static int mptctl_bus_reset(MPT_IOCTL *ioctl)
+static int mptctl_bus_reset(MPT_ADAPTER *ioc, u8 function)
 {
        MPT_FRAME_HDR   *mf;
        SCSITaskMgmt_t  *pScsiTm;
-       MPT_SCSI_HOST   *hd;
+       SCSITaskMgmtReply_t *pScsiTmReply;
        int              ii;
-       int              retval=0;
-
-
-       ioctl->reset &= ~MPTCTL_RESET_OK;
-
-       if (ioctl->ioc->sh == NULL)
+       int              retval;
+       unsigned long    timeout;
+       unsigned long    time_count;
+       u16              iocstatus;
+
+       /* bus reset is only good for SCSI IO, RAID PASSTHRU */
+       if (!(function == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH) ||
+           (function == MPI_FUNCTION_SCSI_IO_REQUEST)) {
+               dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
+                       "TaskMgmt, not SCSI_IO!!\n", ioc->name));
                return -EPERM;
+       }
 
-       hd = shost_priv(ioctl->ioc->sh);
-       if (hd == NULL)
+       mutex_lock(&ioc->taskmgmt_cmds.mutex);
+       if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+               mutex_unlock(&ioc->taskmgmt_cmds.mutex);
                return -EPERM;
+       }
 
-       /* Single threading ....
-        */
-       if (mptctl_set_tm_flags(hd) != 0)
-               return -EPERM;
+       retval = 0;
 
        /* Send request
         */
-       if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) {
-               dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt, no msg frames!!\n",
-                               ioctl->ioc->name));
-
-               mptctl_free_tm_flags(ioctl->ioc);
-               return -ENOMEM;
+       mf = mpt_get_msg_frame(mptctl_taskmgmt_id, ioc);
+       if (mf == NULL) {
+               dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
+                       "TaskMgmt, no msg frames!!\n", ioc->name));
+               mpt_clear_taskmgmt_in_progress_flag(ioc);
+               retval = -ENOMEM;
+               goto mptctl_bus_reset_done;
        }
 
-       dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n",
-                       ioctl->ioc->name, mf));
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
+               ioc->name, mf));
 
        pScsiTm = (SCSITaskMgmt_t *) mf;
-       pScsiTm->TargetID = ioctl->id;
-       pScsiTm->Bus = hd->port;        /* 0 */
-       pScsiTm->ChainOffset = 0;
+       memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
        pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
-       pScsiTm->Reserved = 0;
        pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
-       pScsiTm->Reserved1 = 0;
        pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
-
+       pScsiTm->TargetID = 0;
+       pScsiTm->Bus = 0;
+       pScsiTm->ChainOffset = 0;
+       pScsiTm->Reserved = 0;
+       pScsiTm->Reserved1 = 0;
+       pScsiTm->TaskMsgContext = 0;
        for (ii= 0; ii < 8; ii++)
                pScsiTm->LUN[ii] = 0;
-
        for (ii=0; ii < 7; ii++)
                pScsiTm->Reserved2[ii] = 0;
 
-       pScsiTm->TaskMsgContext = 0;
-       dtmprintk(ioctl->ioc, printk(MYIOC_s_DEBUG_FMT
-               "mptctl_bus_reset: issued.\n", ioctl->ioc->name));
-
-       DBG_DUMP_TM_REQUEST_FRAME(ioctl->ioc, (u32 *)mf);
+       switch (ioc->bus_type) {
+       case FC:
+               timeout = 40;
+               break;
+       case SAS:
+               timeout = 30;
+               break;
+       case SPI:
+       default:
+               timeout = 2;
+               break;
+       }
 
-       ioctl->wait_done=0;
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+               "TaskMgmt type=%d timeout=%ld\n",
+               ioc->name, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS, timeout));
 
-       if ((ioctl->ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
-           (ioctl->ioc->facts.MsgVersion >= MPI_VERSION_01_05))
-               mpt_put_msg_frame_hi_pri(mptctl_id, ioctl->ioc, mf);
+       INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+       CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+       time_count = jiffies;
+       if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
+           (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
+               mpt_put_msg_frame_hi_pri(mptctl_taskmgmt_id, ioc, mf);
        else {
-               retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc,
-                       sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
+               retval = mpt_send_handshake_request(mptctl_taskmgmt_id, ioc,
+                   sizeof(SCSITaskMgmt_t), (u32 *)pScsiTm, CAN_SLEEP);
                if (retval != 0) {
-                       dfailprintk(ioctl->ioc, printk(MYIOC_s_ERR_FMT "_send_handshake FAILED!"
-                               " (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd,
-                               hd->ioc, mf));
+                       dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                               "TaskMgmt send_handshake FAILED!"
+                               " (ioc %p, mf %p, rc=%d) \n", ioc->name,
+                               ioc, mf, retval));
+                       mpt_clear_taskmgmt_in_progress_flag(ioc);
                        goto mptctl_bus_reset_done;
                }
        }
 
        /* Now wait for the command to complete */
-       ii = wait_event_timeout(mptctl_wait,
-            ioctl->wait_done == 1,
-            HZ*5 /* 5 second timeout */);
+       ii = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done, timeout*HZ);
+       if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "TaskMgmt failed\n", ioc->name));
+               mpt_free_msg_frame(ioc, mf);
+               mpt_clear_taskmgmt_in_progress_flag(ioc);
+               if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+                       retval = 0;
+               else
+                       retval = -1; /* return failure */
+               goto mptctl_bus_reset_done;
+       }
 
-       if(ii <=0 && (ioctl->wait_done != 1 ))  {
-               mpt_free_msg_frame(hd->ioc, mf);
-               ioctl->wait_done = 0;
+       if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "TaskMgmt failed\n", ioc->name));
+               retval = -1; /* return failure */
+               goto mptctl_bus_reset_done;
+       }
+
+       pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "TaskMgmt fw_channel = %d, fw_id = %d, task_type=0x%02X, "
+           "iocstatus=0x%04X\n\tloginfo=0x%08X, response_code=0x%02X, "
+           "term_cmnds=%d\n", ioc->name, pScsiTmReply->Bus,
+           pScsiTmReply->TargetID, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+           le16_to_cpu(pScsiTmReply->IOCStatus),
+           le32_to_cpu(pScsiTmReply->IOCLogInfo),
+           pScsiTmReply->ResponseCode,
+           le32_to_cpu(pScsiTmReply->TerminationCount)));
+
+       iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+
+       if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
+          iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED ||
+          iocstatus == MPI_IOCSTATUS_SUCCESS)
+               retval = 0;
+       else {
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "TaskMgmt failed\n", ioc->name));
                retval = -1; /* return failure */
        }
 
-mptctl_bus_reset_done:
 
-       mptctl_free_tm_flags(ioctl->ioc);
+ mptctl_bus_reset_done:
+       mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+       CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
        return retval;
 }
 
-static int
-mptctl_set_tm_flags(MPT_SCSI_HOST *hd) {
-       unsigned long flags;
-
-       spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
-
-       if (hd->tmState == TM_STATE_NONE) {
-               hd->tmState = TM_STATE_IN_PROGRESS;
-               hd->tmPending = 1;
-               spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
-       } else {
-               spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
-               return -EBUSY;
-       }
-
-       return 0;
-}
-
-static void
-mptctl_free_tm_flags(MPT_ADAPTER *ioc)
-{
-       MPT_SCSI_HOST * hd;
-       unsigned long flags;
-
-       hd = shost_priv(ioc->sh);
-       if (hd == NULL)
-               return;
-
-       spin_lock_irqsave(&ioc->FreeQlock, flags);
-
-       hd->tmState = TM_STATE_NONE;
-       hd->tmPending = 0;
-       spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
-       return;
-}
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /* mptctl_ioc_reset
@@ -473,22 +503,23 @@ mptctl_free_tm_flags(MPT_ADAPTER *ioc)
 static int
 mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 {
-       MPT_IOCTL *ioctl = ioc->ioctl;
-       dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IOC %s_reset routed to IOCTL driver!\n", ioc->name,
-               reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
-               reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
-
-       if(ioctl == NULL)
-               return 1;
-
        switch(reset_phase) {
        case MPT_IOC_SETUP_RESET:
-               ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET;
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+               break;
+       case MPT_IOC_PRE_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
                break;
        case MPT_IOC_POST_RESET:
-               ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET;
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+               if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_PENDING) {
+                       ioc->ioctl_cmds.status |= MPT_MGMT_STATUS_DID_IOCRESET;
+                       complete(&ioc->ioctl_cmds.done);
+               }
                break;
-       case MPT_IOC_PRE_RESET:
        default:
                break;
        }
@@ -642,7 +673,7 @@ __mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
        else
                ret = -EINVAL;
 
-       mutex_unlock(&iocp->ioctl->ioctl_mutex);
+       mutex_unlock(&iocp->ioctl_cmds.mutex);
 
        return ret;
 }
@@ -758,6 +789,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
        int                      sge_offset = 0;
        u16                      iocstat;
        pFWDownloadReply_t       ReplyMsg = NULL;
+       unsigned long            timeleft;
 
        if (mpt_verify_adapter(ioc, &iocp) < 0) {
                printk(KERN_DEBUG MYNAM "ioctl_fwdl - ioc%d not found!\n",
@@ -841,8 +873,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
         *      96              8
         *      64              4
         */
-       maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t))
-                       / (sizeof(dma_addr_t) + sizeof(u32));
+       maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) -
+                       sizeof(FWDownloadTCSGE_t))
+                       / iocp->SGE_size;
        if (numfrags > maxfrags) {
                ret = -EMLINK;
                goto fwdl_out;
@@ -870,7 +903,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
                if (nib == 0 || nib == 3) {
                        ;
                } else if (sgIn->Address) {
-                       mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
+                       iocp->add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
                        n++;
                        if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
                                printk(MYIOC_s_ERR_FMT "%s@%d::_ioctl_fwdl - "
@@ -882,7 +915,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
                }
                sgIn++;
                bl++;
-               sgOut += (sizeof(dma_addr_t) + sizeof(u32));
+               sgOut += iocp->SGE_size;
        }
 
        DBG_DUMP_FW_DOWNLOAD(iocp, (u32 *)mf, numfrags);
@@ -891,16 +924,30 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
         * Finally, perform firmware download.
         */
        ReplyMsg = NULL;
+       SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, dlmsg->MsgContext);
+       INITIALIZE_MGMT_STATUS(iocp->ioctl_cmds.status)
        mpt_put_msg_frame(mptctl_id, iocp, mf);
 
        /* Now wait for the command to complete */
-       ret = wait_event_timeout(mptctl_wait,
-            iocp->ioctl->wait_done == 1,
-            HZ*60);
+retry_wait:
+       timeleft = wait_for_completion_timeout(&iocp->ioctl_cmds.done, HZ*60);
+       if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               ret = -ETIME;
+               printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
+               if (iocp->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+                       mpt_free_msg_frame(iocp, mf);
+                       goto fwdl_out;
+               }
+               if (!timeleft)
+                       mptctl_timeout_expired(iocp, mf);
+               else
+                       goto retry_wait;
+               goto fwdl_out;
+       }
 
-       if(ret <=0 && (iocp->ioctl->wait_done != 1 )) {
-       /* Now we need to reset the board */
-               mptctl_timeout_expired(iocp->ioctl);
+       if (!(iocp->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+               printk(MYIOC_s_WARN_FMT "%s: failed\n", iocp->name, __func__);
+               mpt_free_msg_frame(iocp, mf);
                ret = -ENODATA;
                goto fwdl_out;
        }
@@ -908,7 +955,7 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
        if (sgl)
                kfree_sgl(sgl, sgl_dma, buflist, iocp);
 
-       ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame;
+       ReplyMsg = (pFWDownloadReply_t)iocp->ioctl_cmds.reply;
        iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
        if (iocstat == MPI_IOCSTATUS_SUCCESS) {
                printk(MYIOC_s_INFO_FMT "F/W update successfull!\n", iocp->name);
@@ -932,6 +979,9 @@ mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
        return 0;
 
 fwdl_out:
+
+       CLEAR_MGMT_STATUS(iocp->ioctl_cmds.status);
+       SET_MGMT_MSG_CONTEXT(iocp->ioctl_cmds.msg_context, 0);
         kfree_sgl(sgl, sgl_dma, buflist, iocp);
        return ret;
 }
@@ -1003,7 +1053,7 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
         *
         */
        sgl = sglbuf;
-       sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1;
+       sg_spill = ((ioc->req_sz - sge_offset)/ioc->SGE_size) - 1;
        while (bytes_allocd < bytes) {
                this_alloc = min(alloc_sz, bytes-bytes_allocd);
                buflist[buflist_ent].len = this_alloc;
@@ -1024,8 +1074,9 @@ kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
                        dma_addr_t dma_addr;
 
                        bytes_allocd += this_alloc;
-                       sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc);
-                       dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir);
+                       sgl->FlagsLength = (0x10000000|sgdir|this_alloc);
+                       dma_addr = pci_map_single(ioc->pcidev,
+                               buflist[buflist_ent].kptr, this_alloc, dir);
                        sgl->Address = dma_addr;
 
                        fragcnt++;
@@ -1771,7 +1822,10 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
        int             msgContext;
        u16             req_idx;
        ulong           timeout;
+       unsigned long   timeleft;
        struct scsi_device *sdev;
+       unsigned long    flags;
+       u8               function;
 
        /* bufIn and bufOut are used for user to kernel space transfers
         */
@@ -1784,24 +1838,23 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                                __FILE__, __LINE__, iocnum);
                return -ENODEV;
        }
-       if (!ioc->ioctl) {
-               printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
-                       "No memory available during driver init.\n",
-                               __FILE__, __LINE__);
-               return -ENOMEM;
-       } else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) {
+
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       if (ioc->ioc_reset_in_progress) {
+               spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
                printk(KERN_ERR MYNAM "%s@%d::mptctl_do_mpt_command - "
-                       "Busy with IOC Reset \n", __FILE__, __LINE__);
+                       "Busy with diagnostic reset\n", __FILE__, __LINE__);
                return -EBUSY;
        }
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 
        /* Verify that the final request frame will not be too large.
         */
        sz = karg.dataSgeOffset * 4;
        if (karg.dataInSize > 0)
-               sz += sizeof(dma_addr_t) + sizeof(u32);
+               sz += ioc->SGE_size;
        if (karg.dataOutSize > 0)
-               sz += sizeof(dma_addr_t) + sizeof(u32);
+               sz += ioc->SGE_size;
 
        if (sz > ioc->req_sz) {
                printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1827,10 +1880,12 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
                        "Unable to read MF from mpt_ioctl_command struct @ %p\n",
                        ioc->name, __FILE__, __LINE__, mfPtr);
+               function = -1;
                rc = -EFAULT;
                goto done_free_mem;
        }
        hdr->MsgContext = cpu_to_le32(msgContext);
+       function = hdr->Function;
 
 
        /* Verify that this request is allowed.
@@ -1838,7 +1893,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
        dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sending mpi function (0x%02X), req=%p\n",
            ioc->name, hdr->Function, mf));
 
-       switch (hdr->Function) {
+       switch (function) {
        case MPI_FUNCTION_IOC_FACTS:
        case MPI_FUNCTION_PORT_FACTS:
                karg.dataOutSize  = karg.dataInSize = 0;
@@ -1893,7 +1948,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                        }
 
                        pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
-                       pScsiReq->MsgFlags |= mpt_msg_flags();
+                       pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
 
 
                        /* verify that app has not requested
@@ -1935,8 +1990,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                        pScsiReq->Control = cpu_to_le32(scsidir | qtag);
                        pScsiReq->DataLength = cpu_to_le32(dataSize);
 
-                       ioc->ioctl->reset = MPTCTL_RESET_OK;
-                       ioc->ioctl->id = pScsiReq->TargetID;
 
                } else {
                        printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
@@ -1979,7 +2032,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                        int dataSize;
 
                        pScsiReq->MsgFlags &= ~MPI_SCSIIO_MSGFLGS_SENSE_WIDTH;
-                       pScsiReq->MsgFlags |= mpt_msg_flags();
+                       pScsiReq->MsgFlags |= mpt_msg_flags(ioc);
 
 
                        /* verify that app has not requested
@@ -2014,8 +2067,6 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                        pScsiReq->Control = cpu_to_le32(scsidir | qtag);
                        pScsiReq->DataLength = cpu_to_le32(dataSize);
 
-                       ioc->ioctl->reset = MPTCTL_RESET_OK;
-                       ioc->ioctl->id = pScsiReq->TargetID;
                } else {
                        printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
                                "SCSI driver is not loaded. \n",
@@ -2026,20 +2077,17 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                break;
 
        case MPI_FUNCTION_SCSI_TASK_MGMT:
-               {
-                       MPT_SCSI_HOST *hd = NULL;
-                       if ((ioc->sh == NULL) || ((hd = shost_priv(ioc->sh)) == NULL)) {
-                               printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
-                                       "SCSI driver not loaded or SCSI host not found. \n",
-                                       ioc->name, __FILE__, __LINE__);
-                               rc = -EFAULT;
-                               goto done_free_mem;
-                       } else if (mptctl_set_tm_flags(hd) != 0) {
-                               rc = -EPERM;
-                               goto done_free_mem;
-                       }
-               }
+       {
+               SCSITaskMgmt_t  *pScsiTm;
+               pScsiTm = (SCSITaskMgmt_t *)mf;
+               dctlprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                       "\tTaskType=0x%x MsgFlags=0x%x "
+                       "TaskMsgContext=0x%x id=%d channel=%d\n",
+                       ioc->name, pScsiTm->TaskType, le32_to_cpu
+                       (pScsiTm->TaskMsgContext), pScsiTm->MsgFlags,
+                       pScsiTm->TargetID, pScsiTm->Bus));
                break;
+       }
 
        case MPI_FUNCTION_IOC_INIT:
                {
@@ -2123,8 +2171,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                        if (karg.dataInSize > 0) {
                                flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
                                                MPI_SGE_FLAGS_END_OF_BUFFER |
-                                               MPI_SGE_FLAGS_DIRECTION |
-                                               mpt_addr_size() )
+                                               MPI_SGE_FLAGS_DIRECTION)
                                                << MPI_SGE_FLAGS_SHIFT;
                        } else {
                                flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
@@ -2141,8 +2188,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                                /* Set up this SGE.
                                 * Copy to MF and to sglbuf
                                 */
-                               mpt_add_sge(psge, flagsLength, dma_addr_out);
-                               psge += (sizeof(u32) + sizeof(dma_addr_t));
+                               ioc->add_sge(psge, flagsLength, dma_addr_out);
+                               psge += ioc->SGE_size;
 
                                /* Copy user data to kernel space.
                                 */
@@ -2175,18 +2222,25 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                                /* Set up this SGE
                                 * Copy to MF and to sglbuf
                                 */
-                               mpt_add_sge(psge, flagsLength, dma_addr_in);
+                               ioc->add_sge(psge, flagsLength, dma_addr_in);
                        }
                }
        } else  {
                /* Add a NULL SGE
                 */
-               mpt_add_sge(psge, flagsLength, (dma_addr_t) -1);
+               ioc->add_sge(psge, flagsLength, (dma_addr_t) -1);
        }
 
-       ioc->ioctl->wait_done = 0;
+       SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, hdr->MsgContext);
+       INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
        if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
 
+               mutex_lock(&ioc->taskmgmt_cmds.mutex);
+               if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+                       mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+                       goto done_free_mem;
+               }
+
                DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
 
                if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
@@ -2197,10 +2251,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
                                sizeof(SCSITaskMgmt_t), (u32*)mf, CAN_SLEEP);
                        if (rc != 0) {
                                dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                                   "_send_handshake FAILED! (ioc %p, mf %p)\n",
+                                   "send_handshake FAILED! (ioc %p, mf %p)\n",
                                    ioc->name, ioc, mf));
-                               mptctl_free_tm_flags(ioc);
+                               mpt_clear_taskmgmt_in_progress_flag(ioc);
                                rc = -ENODATA;
+                               mutex_unlock(&ioc->taskmgmt_cmds.mutex);
                                goto done_free_mem;
                        }
                }
@@ -2210,36 +2265,47 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
 
        /* Now wait for the command to complete */
        timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
-       timeout = wait_event_timeout(mptctl_wait,
-            ioc->ioctl->wait_done == 1,
-            HZ*timeout);
-
-       if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) {
-       /* Now we need to reset the board */
-
-               if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT)
-                       mptctl_free_tm_flags(ioc);
-
-               mptctl_timeout_expired(ioc->ioctl);
-               rc = -ENODATA;
+retry_wait:
+       timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
+                               HZ*timeout);
+       if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               rc = -ETIME;
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "%s: TIMED OUT!\n",
+                   ioc->name, __func__));
+               if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+                       if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+                               mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+                       goto done_free_mem;
+               }
+               if (!timeleft) {
+                       if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+                               mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+                       mptctl_timeout_expired(ioc, mf);
+                       mf = NULL;
+               } else
+                       goto retry_wait;
                goto done_free_mem;
        }
 
+       if (function == MPI_FUNCTION_SCSI_TASK_MGMT)
+               mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+
+
        mf = NULL;
 
        /* If a valid reply frame, copy to the user.
         * Offset 2: reply length in U32's
         */
-       if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) {
+       if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID) {
                if (karg.maxReplyBytes < ioc->reply_sz) {
-                        sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]);
+                       sz = min(karg.maxReplyBytes,
+                               4*ioc->ioctl_cmds.reply[2]);
                } else {
-                        sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]);
+                        sz = min(ioc->reply_sz, 4*ioc->ioctl_cmds.reply[2]);
                }
-
                if (sz > 0) {
                        if (copy_to_user(karg.replyFrameBufPtr,
-                                &ioc->ioctl->ReplyFrame, sz)){
+                                ioc->ioctl_cmds.reply, sz)){
                                 printk(MYIOC_s_ERR_FMT
                                     "%s@%d::mptctl_do_mpt_command - "
                                 "Unable to write out reply frame %p\n",
@@ -2252,10 +2318,11 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
 
        /* If valid sense data, copy to user.
         */
-       if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) {
+       if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_SENSE_VALID) {
                sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
                if (sz > 0) {
-                       if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) {
+                       if (copy_to_user(karg.senseDataPtr,
+                               ioc->ioctl_cmds.sense, sz)) {
                                printk(MYIOC_s_ERR_FMT "%s@%d::mptctl_do_mpt_command - "
                                "Unable to write sense data to user %p\n",
                                ioc->name, __FILE__, __LINE__,
@@ -2269,7 +2336,7 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
        /* If the overall status is _GOOD and data in, copy data
         * to user.
         */
-       if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) &&
+       if ((ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD) &&
                                (karg.dataInSize > 0) && (bufIn.kptr)) {
 
                if (copy_to_user(karg.dataInBufPtr,
@@ -2284,9 +2351,8 @@ mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
 
 done_free_mem:
 
-       ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD |
-               MPT_IOCTL_STATUS_SENSE_VALID |
-               MPT_IOCTL_STATUS_RF_VALID );
+       CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
+       SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
 
        /* Free the allocated memory.
         */
@@ -2336,6 +2402,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
        ToolboxIstwiReadWriteRequest_t  *IstwiRWRequest;
        MPT_FRAME_HDR           *mf = NULL;
        MPIHeader_t             *mpi_hdr;
+       unsigned long           timeleft;
+       int                     retval;
 
        /* Reset long to int. Should affect IA64 and SPARC only
         */
@@ -2466,9 +2534,9 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
                MPT_SCSI_HOST *hd =  shost_priv(ioc->sh);
 
                if (hd && (cim_rev == 1)) {
-                       karg.hard_resets = hd->hard_resets;
-                       karg.soft_resets = hd->soft_resets;
-                       karg.timeouts = hd->timeouts;
+                       karg.hard_resets = ioc->hard_resets;
+                       karg.soft_resets = ioc->soft_resets;
+                       karg.timeouts = ioc->timeouts;
                }
        }
 
@@ -2476,8 +2544,8 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
         * Gather ISTWI(Industry Standard Two Wire Interface) Data
         */
        if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL) {
-               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames!!\n",
-                   ioc->name,__func__));
+               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+                       "%s, no msg frames!!\n", ioc->name, __func__));
                goto out;
        }
 
@@ -2498,22 +2566,29 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
        pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
        if (!pbuf)
                goto out;
-       mpt_add_sge((char *)&IstwiRWRequest->SGL,
+       ioc->add_sge((char *)&IstwiRWRequest->SGL,
            (MPT_SGE_FLAGS_SSIMPLE_READ|4), buf_dma);
 
-       ioc->ioctl->wait_done = 0;
+       retval = 0;
+       SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context,
+                               IstwiRWRequest->MsgContext);
+       INITIALIZE_MGMT_STATUS(ioc->ioctl_cmds.status)
        mpt_put_msg_frame(mptctl_id, ioc, mf);
 
-       rc = wait_event_timeout(mptctl_wait,
-            ioc->ioctl->wait_done == 1,
-            HZ*MPT_IOCTL_DEFAULT_TIMEOUT /* 10 sec */);
-
-       if(rc <=0 && (ioc->ioctl->wait_done != 1 )) {
-               /*
-                * Now we need to reset the board
-                */
-               mpt_free_msg_frame(ioc, mf);
-               mptctl_timeout_expired(ioc->ioctl);
+retry_wait:
+       timeleft = wait_for_completion_timeout(&ioc->ioctl_cmds.done,
+                       HZ*MPT_IOCTL_DEFAULT_TIMEOUT);
+       if (!(ioc->ioctl_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               retval = -ETIME;
+               printk(MYIOC_s_WARN_FMT "%s: failed\n", ioc->name, __func__);
+               if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+                       mpt_free_msg_frame(ioc, mf);
+                       goto out;
+               }
+               if (!timeleft)
+                       mptctl_timeout_expired(ioc, mf);
+               else
+                       goto retry_wait;
                goto out;
        }
 
@@ -2526,10 +2601,13 @@ mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
         *   bays have drives in them
         * pbuf[3] = Checksum (0x100 = (byte0 + byte2 + byte3)
         */
-       if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID)
+       if (ioc->ioctl_cmds.status & MPT_MGMT_STATUS_RF_VALID)
                karg.rsvd = *(u32 *)pbuf;
 
  out:
+       CLEAR_MGMT_STATUS(ioc->ioctl_cmds.status)
+       SET_MGMT_MSG_CONTEXT(ioc->ioctl_cmds.msg_context, 0);
+
        if (pbuf)
                pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
 
@@ -2753,7 +2831,7 @@ compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
 
        ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
 
-       mutex_unlock(&iocp->ioctl->ioctl_mutex);
+       mutex_unlock(&iocp->ioctl_cmds.mutex);
 
        return ret;
 }
@@ -2807,7 +2885,7 @@ compat_mpt_command(struct file *filp, unsigned int cmd,
         */
        ret = mptctl_do_mpt_command (karg, &uarg->MF);
 
-       mutex_unlock(&iocp->ioctl->ioctl_mutex);
+       mutex_unlock(&iocp->ioctl_cmds.mutex);
 
        return ret;
 }
@@ -2859,21 +2937,10 @@ static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long a
 static int
 mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
-       MPT_IOCTL *mem;
        MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
 
-       /*
-        * Allocate and inite a MPT_IOCTL structure
-       */
-       mem = kzalloc(sizeof(MPT_IOCTL), GFP_KERNEL);
-       if (!mem) {
-               mptctl_remove(pdev);
-               return -ENOMEM;
-       }
-
-       ioc->ioctl = mem;
-       ioc->ioctl->ioc = ioc;
-       mutex_init(&ioc->ioctl->ioctl_mutex);
+       mutex_init(&ioc->ioctl_cmds.mutex);
+       init_completion(&ioc->ioctl_cmds.done);
        return 0;
 }
 
@@ -2887,9 +2954,6 @@ mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 static void
 mptctl_remove(struct pci_dev *pdev)
 {
-       MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
-
-       kfree ( ioc->ioctl );
 }
 
 static struct mpt_pci_driver mptctl_driver = {
@@ -2929,6 +2993,7 @@ static int __init mptctl_init(void)
                goto out_fail;
        }
 
+       mptctl_taskmgmt_id = mpt_register(mptctl_taskmgmt_reply, MPTCTL_DRIVER);
        mpt_reset_register(mptctl_id, mptctl_ioc_reset);
        mpt_event_register(mptctl_id, mptctl_event_process);
 
@@ -2953,6 +3018,7 @@ static void mptctl_exit(void)
 
        /* De-register callback handler from base module */
        mpt_deregister(mptctl_id);
+       mpt_reset_deregister(mptctl_taskmgmt_id);
 
         mpt_device_driver_deregister(MPTCTL_DRIVER);
 
index 510b9f492093432ea63ef5d03dadd39cd80ca048..28e4788792848d4854618ce9d0c8f2146399a998 100644 (file)
@@ -58,6 +58,7 @@
 #define MPT_DEBUG_FC                   0x00080000
 #define MPT_DEBUG_SAS                  0x00100000
 #define MPT_DEBUG_SAS_WIDE             0x00200000
+#define MPT_DEBUG_36GB_MEM              0x00400000
 
 /*
  * CONFIG_FUSION_LOGGING - enabled in Kconfig
 #define dsaswideprintk(IOC, CMD)               \
        MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_SAS_WIDE)
 
+#define d36memprintk(IOC, CMD)         \
+       MPT_CHECK_LOGGING(IOC, CMD, MPT_DEBUG_36GB_MEM)
 
 
 /*
index c3c24fdf9fb621c20acb8249496c6440cad8b8b6..e61df133a59e8a934b25e9d93d419af6e6a50899 100644 (file)
@@ -1251,17 +1251,15 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         * A slightly different algorithm is required for
         * 64bit SGEs.
         */
-       scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
-       if (sizeof(dma_addr_t) == sizeof(u64)) {
+       scale = ioc->req_sz/ioc->SGE_size;
+       if (ioc->sg_addr_size == sizeof(u64)) {
                numSGE = (scale - 1) *
                  (ioc->facts.MaxChainDepth-1) + scale +
-                 (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
-                 sizeof(u32));
+                 (ioc->req_sz - 60) / ioc->SGE_size;
        } else {
                numSGE = 1 + (scale - 1) *
                  (ioc->facts.MaxChainDepth-1) + scale +
-                 (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
-                 sizeof(u32));
+                 (ioc->req_sz - 64) / ioc->SGE_size;
        }
 
        if (numSGE < sh->sg_tablesize) {
@@ -1292,9 +1290,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        /* Clear the TM flags
         */
-       hd->tmPending = 0;
-       hd->tmState = TM_STATE_NONE;
-       hd->resetPending = 0;
        hd->abortSCpnt = NULL;
 
        /* Clear the pointer used to store
@@ -1312,8 +1307,6 @@ mptfc_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        hd->timer.data = (unsigned long) hd;
        hd->timer.function = mptscsih_timer_expired;
 
-       init_waitqueue_head(&hd->scandv_waitq);
-       hd->scandv_wait_done = 0;
        hd->last_queue_full = 0;
 
        sh->transportt = mptfc_transport_template;
index 79f5433359f9b3fc1b346cb78b5d11dcd60d15de..20e0b447e8e83a0ce07455de4d7f56215f43a568 100644 (file)
@@ -93,8 +93,37 @@ static u8    mptsasDoneCtx = MPT_MAX_PROTOCOL_DRIVERS;
 static u8      mptsasTaskCtx = MPT_MAX_PROTOCOL_DRIVERS;
 static u8      mptsasInternalCtx = MPT_MAX_PROTOCOL_DRIVERS; /* Used only for internal commands */
 static u8      mptsasMgmtCtx = MPT_MAX_PROTOCOL_DRIVERS;
-
-static void mptsas_hotplug_work(struct work_struct *work);
+static u8      mptsasDeviceResetCtx = MPT_MAX_PROTOCOL_DRIVERS;
+
+static void mptsas_firmware_event_work(struct work_struct *work);
+static void mptsas_send_sas_event(struct fw_event_work *fw_event);
+static void mptsas_send_raid_event(struct fw_event_work *fw_event);
+static void mptsas_send_ir2_event(struct fw_event_work *fw_event);
+static void mptsas_parse_device_info(struct sas_identify *identify,
+               struct mptsas_devinfo *device_info);
+static inline void mptsas_set_rphy(MPT_ADAPTER *ioc,
+               struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy);
+static struct mptsas_phyinfo   *mptsas_find_phyinfo_by_sas_address
+               (MPT_ADAPTER *ioc, u64 sas_address);
+static int mptsas_sas_device_pg0(MPT_ADAPTER *ioc,
+       struct mptsas_devinfo *device_info, u32 form, u32 form_specific);
+static int mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc,
+       struct mptsas_enclosure *enclosure, u32 form, u32 form_specific);
+static int mptsas_add_end_device(MPT_ADAPTER *ioc,
+       struct mptsas_phyinfo *phy_info);
+static void mptsas_del_end_device(MPT_ADAPTER *ioc,
+       struct mptsas_phyinfo *phy_info);
+static void mptsas_send_link_status_event(struct fw_event_work *fw_event);
+static struct mptsas_portinfo  *mptsas_find_portinfo_by_sas_address
+               (MPT_ADAPTER *ioc, u64 sas_address);
+static void mptsas_expander_delete(MPT_ADAPTER *ioc,
+               struct mptsas_portinfo *port_info, u8 force);
+static void mptsas_send_expander_event(struct fw_event_work *fw_event);
+static void mptsas_not_responding_devices(MPT_ADAPTER *ioc);
+static void mptsas_scan_sas_topology(MPT_ADAPTER *ioc);
+static void mptsas_broadcast_primative_work(struct fw_event_work *fw_event);
+static void mptsas_handle_queue_full_event(struct fw_event_work *fw_event);
+static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id);
 
 static void mptsas_print_phy_data(MPT_ADAPTER *ioc,
                                        MPI_SAS_IO_UNIT0_PHY_DATA *phy_data)
@@ -218,30 +247,125 @@ static void mptsas_print_expander_pg1(MPT_ADAPTER *ioc, SasExpanderPage1_t *pg1)
            le16_to_cpu(pg1->AttachedDevHandle)));
 }
 
-static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
+/* inhibit sas firmware event handling */
+static void
+mptsas_fw_event_off(MPT_ADAPTER *ioc)
 {
-       struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
-       return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioc->fw_event_lock, flags);
+       ioc->fw_events_off = 1;
+       ioc->sas_discovery_quiesce_io = 0;
+       spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+
 }
 
-static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
+/* enable sas firmware event handling */
+static void
+mptsas_fw_event_on(MPT_ADAPTER *ioc)
 {
-       struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
-       return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioc->fw_event_lock, flags);
+       ioc->fw_events_off = 0;
+       spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 }
 
-static struct mptsas_portinfo *
-mptsas_get_hba_portinfo(MPT_ADAPTER *ioc)
+/* queue a sas firmware event */
+static void
+mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+    unsigned long delay)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioc->fw_event_lock, flags);
+       list_add_tail(&fw_event->list, &ioc->fw_event_list);
+       INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)\n",
+           ioc->name, __func__, fw_event));
+       queue_delayed_work(ioc->fw_event_q, &fw_event->work,
+           delay);
+       spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* requeue a sas firmware event */
+static void
+mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+    unsigned long delay)
+{
+       unsigned long flags;
+       spin_lock_irqsave(&ioc->fw_event_lock, flags);
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: reschedule task "
+           "(fw_event=0x%p)\n", ioc->name, __func__, fw_event));
+       fw_event->retries++;
+       queue_delayed_work(ioc->fw_event_q, &fw_event->work,
+           msecs_to_jiffies(delay));
+       spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* free memory assoicated to a sas firmware event */
+static void
+mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
+{
+       unsigned long flags;
+
+       spin_lock_irqsave(&ioc->fw_event_lock, flags);
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
+           ioc->name, __func__, fw_event));
+       list_del(&fw_event->list);
+       kfree(fw_event);
+       spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+}
+
+/* walk the firmware event queue, and either stop or wait for
+ * outstanding events to complete */
+static void
+mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
 {
-       struct list_head        *head = &ioc->sas_topology;
-       struct mptsas_portinfo  *pi = NULL;
+       struct fw_event_work *fw_event, *next;
+       struct mptsas_target_reset_event *target_reset_list, *n;
+       u8      flush_q;
+       MPT_SCSI_HOST   *hd = shost_priv(ioc->sh);
+
+       /* flush the target_reset_list */
+       if (!list_empty(&hd->target_reset_list)) {
+               list_for_each_entry_safe(target_reset_list, n,
+                   &hd->target_reset_list, list) {
+                       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                           "%s: removing target reset for id=%d\n",
+                           ioc->name, __func__,
+                          target_reset_list->sas_event_data.TargetID));
+                       list_del(&target_reset_list->list);
+                       kfree(target_reset_list);
+               }
+       }
+
+       if (list_empty(&ioc->fw_event_list) ||
+            !ioc->fw_event_q || in_interrupt())
+               return;
+
+       flush_q = 0;
+       list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
+               if (cancel_delayed_work(&fw_event->work))
+                       mptsas_free_fw_event(ioc, fw_event);
+               else
+                       flush_q = 1;
+       }
+       if (flush_q)
+               flush_workqueue(ioc->fw_event_q);
+}
 
-       /* always the first entry on sas_topology list */
 
-       if (!list_empty(head))
-               pi = list_entry(head->next, struct mptsas_portinfo, list);
+static inline MPT_ADAPTER *phy_to_ioc(struct sas_phy *phy)
+{
+       struct Scsi_Host *shost = dev_to_shost(phy->dev.parent);
+       return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
+}
 
-       return pi;
+static inline MPT_ADAPTER *rphy_to_ioc(struct sas_rphy *rphy)
+{
+       struct Scsi_Host *shost = dev_to_shost(rphy->dev.parent->parent);
+       return ((MPT_SCSI_HOST *)shost->hostdata)->ioc;
 }
 
 /*
@@ -265,6 +389,38 @@ mptsas_find_portinfo_by_handle(MPT_ADAPTER *ioc, u16 handle)
        return rc;
 }
 
+/**
+ *     mptsas_find_portinfo_by_sas_address -
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @handle:
+ *
+ *     This function should be called with the sas_topology_mutex already held
+ *
+ **/
+static struct mptsas_portinfo *
+mptsas_find_portinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
+{
+       struct mptsas_portinfo *port_info, *rc = NULL;
+       int i;
+
+       if (sas_address >= ioc->hba_port_sas_addr &&
+           sas_address < (ioc->hba_port_sas_addr +
+           ioc->hba_port_num_phy))
+               return ioc->hba_port_info;
+
+       mutex_lock(&ioc->sas_topology_mutex);
+       list_for_each_entry(port_info, &ioc->sas_topology, list)
+               for (i = 0; i < port_info->num_phys; i++)
+                       if (port_info->phy_info[i].identify.sas_address ==
+                           sas_address) {
+                               rc = port_info;
+                               goto out;
+                       }
+ out:
+       mutex_unlock(&ioc->sas_topology_mutex);
+       return rc;
+}
+
 /*
  * Returns true if there is a scsi end device
  */
@@ -308,6 +464,7 @@ mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_detai
                if(phy_info->port_details != port_details)
                        continue;
                memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
+               mptsas_set_rphy(ioc, phy_info, NULL);
                phy_info->port_details = NULL;
        }
        kfree(port_details);
@@ -379,6 +536,285 @@ starget)
                phy_info->port_details->starget = starget;
 }
 
+/**
+ *     mptsas_add_device_component -
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @channel: fw mapped id's
+ *     @id:
+ *     @sas_address:
+ *     @device_info:
+ *
+ **/
+static void
+mptsas_add_device_component(MPT_ADAPTER *ioc, u8 channel, u8 id,
+       u64 sas_address, u32 device_info, u16 slot, u64 enclosure_logical_id)
+{
+       struct mptsas_device_info       *sas_info, *next;
+       struct scsi_device      *sdev;
+       struct scsi_target      *starget;
+       struct sas_rphy *rphy;
+
+       /*
+        * Delete all matching devices out of the list
+        */
+       mutex_lock(&ioc->sas_device_info_mutex);
+       list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+           list) {
+               if (!sas_info->is_logical_volume &&
+                   (sas_info->sas_address == sas_address ||
+                   (sas_info->fw.channel == channel &&
+                    sas_info->fw.id == id))) {
+                       list_del(&sas_info->list);
+                       kfree(sas_info);
+               }
+       }
+
+       sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
+       if (!sas_info)
+               goto out;
+
+       /*
+        * Set Firmware mapping
+        */
+       sas_info->fw.id = id;
+       sas_info->fw.channel = channel;
+
+       sas_info->sas_address = sas_address;
+       sas_info->device_info = device_info;
+       sas_info->slot = slot;
+       sas_info->enclosure_logical_id = enclosure_logical_id;
+       INIT_LIST_HEAD(&sas_info->list);
+       list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
+
+       /*
+        * Set OS mapping
+        */
+       shost_for_each_device(sdev, ioc->sh) {
+               starget = scsi_target(sdev);
+               rphy = dev_to_rphy(starget->dev.parent);
+               if (rphy->identify.sas_address == sas_address) {
+                       sas_info->os.id = starget->id;
+                       sas_info->os.channel = starget->channel;
+               }
+       }
+
+ out:
+       mutex_unlock(&ioc->sas_device_info_mutex);
+       return;
+}
+
+/**
+ *     mptsas_add_device_component_by_fw -
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @channel:  fw mapped id's
+ *     @id:
+ *
+ **/
+static void
+mptsas_add_device_component_by_fw(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+       struct mptsas_devinfo sas_device;
+       struct mptsas_enclosure enclosure_info;
+       int rc;
+
+       rc = mptsas_sas_device_pg0(ioc, &sas_device,
+           (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+            MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+           (channel << 8) + id);
+       if (rc)
+               return;
+
+       memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
+       mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
+           (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
+            MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
+            sas_device.handle_enclosure);
+
+       mptsas_add_device_component(ioc, sas_device.channel,
+           sas_device.id, sas_device.sas_address, sas_device.device_info,
+           sas_device.slot, enclosure_info.enclosure_logical_id);
+}
+
+/**
+ *     mptsas_add_device_component_starget_ir - Handle Integrated RAID, adding each individual device to list
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @channel: fw mapped id's
+ *     @id:
+ *
+ **/
+static void
+mptsas_add_device_component_starget_ir(MPT_ADAPTER *ioc,
+               struct scsi_target *starget)
+{
+       CONFIGPARMS                     cfg;
+       ConfigPageHeader_t              hdr;
+       dma_addr_t                      dma_handle;
+       pRaidVolumePage0_t              buffer = NULL;
+       int                             i;
+       RaidPhysDiskPage0_t             phys_disk;
+       struct mptsas_device_info       *sas_info, *next;
+
+       memset(&cfg, 0 , sizeof(CONFIGPARMS));
+       memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
+       hdr.PageType = MPI_CONFIG_PAGETYPE_RAID_VOLUME;
+       /* assumption that all volumes on channel = 0 */
+       cfg.pageAddr = starget->id;
+       cfg.cfghdr.hdr = &hdr;
+       cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+       cfg.timeout = 10;
+
+       if (mpt_config(ioc, &cfg) != 0)
+               goto out;
+
+       if (!hdr.PageLength)
+               goto out;
+
+       buffer = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4,
+           &dma_handle);
+
+       if (!buffer)
+               goto out;
+
+       cfg.physAddr = dma_handle;
+       cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+       if (mpt_config(ioc, &cfg) != 0)
+               goto out;
+
+       if (!buffer->NumPhysDisks)
+               goto out;
+
+       /*
+        * Adding entry for hidden components
+        */
+       for (i = 0; i < buffer->NumPhysDisks; i++) {
+
+               if (mpt_raid_phys_disk_pg0(ioc,
+                   buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
+                       continue;
+
+               mptsas_add_device_component_by_fw(ioc, phys_disk.PhysDiskBus,
+                   phys_disk.PhysDiskID);
+
+               mutex_lock(&ioc->sas_device_info_mutex);
+               list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+                   list) {
+                       if (!sas_info->is_logical_volume &&
+                           (sas_info->fw.channel == phys_disk.PhysDiskBus &&
+                           sas_info->fw.id == phys_disk.PhysDiskID)) {
+                               sas_info->is_hidden_raid_component = 1;
+                               sas_info->volume_id = starget->id;
+                       }
+               }
+               mutex_unlock(&ioc->sas_device_info_mutex);
+
+       }
+
+       /*
+        * Delete all matching devices out of the list
+        */
+       mutex_lock(&ioc->sas_device_info_mutex);
+       list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+           list) {
+               if (sas_info->is_logical_volume && sas_info->fw.id ==
+                   starget->id) {
+                       list_del(&sas_info->list);
+                       kfree(sas_info);
+               }
+       }
+
+       sas_info = kzalloc(sizeof(struct mptsas_device_info), GFP_KERNEL);
+       if (sas_info) {
+               sas_info->fw.id = starget->id;
+               sas_info->os.id = starget->id;
+               sas_info->os.channel = starget->channel;
+               sas_info->is_logical_volume = 1;
+               INIT_LIST_HEAD(&sas_info->list);
+               list_add_tail(&sas_info->list, &ioc->sas_device_info_list);
+       }
+       mutex_unlock(&ioc->sas_device_info_mutex);
+
+ out:
+       if (buffer)
+               pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, buffer,
+                   dma_handle);
+}
+
+/**
+ *     mptsas_add_device_component_starget -
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @starget:
+ *
+ **/
+static void
+mptsas_add_device_component_starget(MPT_ADAPTER *ioc,
+       struct scsi_target *starget)
+{
+       VirtTarget      *vtarget;
+       struct sas_rphy *rphy;
+       struct mptsas_phyinfo   *phy_info = NULL;
+       struct mptsas_enclosure enclosure_info;
+
+       rphy = dev_to_rphy(starget->dev.parent);
+       vtarget = starget->hostdata;
+       phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+                       rphy->identify.sas_address);
+       if (!phy_info)
+               return;
+
+       memset(&enclosure_info, 0, sizeof(struct mptsas_enclosure));
+       mptsas_sas_enclosure_pg0(ioc, &enclosure_info,
+               (MPI_SAS_ENCLOS_PGAD_FORM_HANDLE <<
+               MPI_SAS_ENCLOS_PGAD_FORM_SHIFT),
+               phy_info->attached.handle_enclosure);
+
+       mptsas_add_device_component(ioc, phy_info->attached.channel,
+               phy_info->attached.id, phy_info->attached.sas_address,
+               phy_info->attached.device_info,
+               phy_info->attached.slot, enclosure_info.enclosure_logical_id);
+}
+
+/**
+ *     mptsas_del_device_component_by_os - Once a device has been removed, we mark the entry in the list as being cached
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @channel: os mapped id's
+ *     @id:
+ *
+ **/
+static void
+mptsas_del_device_component_by_os(MPT_ADAPTER *ioc, u8 channel, u8 id)
+{
+       struct mptsas_device_info       *sas_info, *next;
+
+       /*
+        * Set is_cached flag
+        */
+       list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+               list) {
+               if (sas_info->os.channel == channel && sas_info->os.id == id)
+                       sas_info->is_cached = 1;
+       }
+}
+
+/**
+ *     mptsas_del_device_components - Cleaning the list
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+static void
+mptsas_del_device_components(MPT_ADAPTER *ioc)
+{
+       struct mptsas_device_info       *sas_info, *next;
+
+       mutex_lock(&ioc->sas_device_info_mutex);
+       list_for_each_entry_safe(sas_info, next, &ioc->sas_device_info_list,
+               list) {
+               list_del(&sas_info->list);
+               kfree(sas_info);
+       }
+       mutex_unlock(&ioc->sas_device_info_mutex);
+}
+
 
 /*
  * mptsas_setup_wide_ports
@@ -434,8 +870,8 @@ mptsas_setup_wide_ports(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
                 * Forming a port
                 */
                if (!port_details) {
-                       port_details = kzalloc(sizeof(*port_details),
-                               GFP_KERNEL);
+                       port_details = kzalloc(sizeof(struct
+                               mptsas_portinfo_details), GFP_KERNEL);
                        if (!port_details)
                                goto out;
                        port_details->num_phys = 1;
@@ -523,15 +959,62 @@ mptsas_find_vtarget(MPT_ADAPTER *ioc, u8 channel, u8 id)
        VirtTarget                      *vtarget = NULL;
 
        shost_for_each_device(sdev, ioc->sh) {
-               if ((vdevice = sdev->hostdata) == NULL)
+               vdevice = sdev->hostdata;
+               if ((vdevice == NULL) ||
+                       (vdevice->vtarget == NULL))
+                       continue;
+               if ((vdevice->vtarget->tflags &
+                   MPT_TARGET_FLAGS_RAID_COMPONENT ||
+                   vdevice->vtarget->raidVolume))
                        continue;
                if (vdevice->vtarget->id == id &&
-                   vdevice->vtarget->channel == channel)
+                       vdevice->vtarget->channel == channel)
                        vtarget = vdevice->vtarget;
        }
        return vtarget;
 }
 
+static void
+mptsas_queue_device_delete(MPT_ADAPTER *ioc,
+       MpiEventDataSasDeviceStatusChange_t *sas_event_data)
+{
+       struct fw_event_work *fw_event;
+       int sz;
+
+       sz = offsetof(struct fw_event_work, event_data) +
+           sizeof(MpiEventDataSasDeviceStatusChange_t);
+       fw_event = kzalloc(sz, GFP_ATOMIC);
+       if (!fw_event) {
+               printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
+                   ioc->name, __func__, __LINE__);
+               return;
+       }
+       memcpy(fw_event->event_data, sas_event_data,
+           sizeof(MpiEventDataSasDeviceStatusChange_t));
+       fw_event->event = MPI_EVENT_SAS_DEVICE_STATUS_CHANGE;
+       fw_event->ioc = ioc;
+       mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
+}
+
+static void
+mptsas_queue_rescan(MPT_ADAPTER *ioc)
+{
+       struct fw_event_work *fw_event;
+       int sz;
+
+       sz = offsetof(struct fw_event_work, event_data);
+       fw_event = kzalloc(sz, GFP_ATOMIC);
+       if (!fw_event) {
+               printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n",
+                   ioc->name, __func__, __LINE__);
+               return;
+       }
+       fw_event->event = -1;
+       fw_event->ioc = ioc;
+       mptsas_add_fw_event(ioc, fw_event, msecs_to_jiffies(1));
+}
+
+
 /**
  * mptsas_target_reset
  *
@@ -550,13 +1033,21 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
 {
        MPT_FRAME_HDR   *mf;
        SCSITaskMgmt_t  *pScsiTm;
-
-       if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
-               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, no msg frames @%d!!\n",
-                   ioc->name,__func__, __LINE__));
+       if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0)
                return 0;
+
+
+       mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
+       if (mf == NULL) {
+               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+                       "%s, no msg frames @%d!!\n", ioc->name,
+                       __func__, __LINE__));
+               goto out_fail;
        }
 
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
+               ioc->name, mf));
+
        /* Format the Request
         */
        pScsiTm = (SCSITaskMgmt_t *) mf;
@@ -569,9 +1060,18 @@ mptsas_target_reset(MPT_ADAPTER *ioc, u8 channel, u8 id)
 
        DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)mf);
 
-       mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+          "TaskMgmt type=%d (sas device delete) fw_channel = %d fw_id = %d)\n",
+          ioc->name, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET, channel, id));
+
+       mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
 
        return 1;
+
+ out_fail:
+
+       mpt_clear_taskmgmt_in_progress_flag(ioc);
+       return 0;
 }
 
 /**
@@ -602,11 +1102,12 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
 
        vtarget->deleted = 1; /* block IO */
 
-       target_reset_list = kzalloc(sizeof(*target_reset_list),
+       target_reset_list = kzalloc(sizeof(struct mptsas_target_reset_event),
            GFP_ATOMIC);
        if (!target_reset_list) {
-               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
-                   ioc->name,__func__, __LINE__));
+               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT
+                       "%s, failed to allocate mem @%d..!!\n",
+                       ioc->name, __func__, __LINE__));
                return;
        }
 
@@ -614,84 +1115,101 @@ mptsas_target_reset_queue(MPT_ADAPTER *ioc,
                sizeof(*sas_event_data));
        list_add_tail(&target_reset_list->list, &hd->target_reset_list);
 
-       if (hd->resetPending)
-               return;
+       target_reset_list->time_count = jiffies;
 
        if (mptsas_target_reset(ioc, channel, id)) {
                target_reset_list->target_reset_issued = 1;
-               hd->resetPending = 1;
        }
 }
 
 /**
- * mptsas_dev_reset_complete
- *
- * Completion for TARGET_RESET after NOT_RESPONDING_EVENT,
- * enable work queue to finish off removing device from upper layers.
- * then send next TARGET_RESET in the queue.
- *
- * @ioc
+ *     mptsas_taskmgmt_complete - complete SAS task management function
+ *     @ioc: Pointer to MPT_ADAPTER structure
  *
+ *     Completion for TARGET_RESET after NOT_RESPONDING_EVENT, enable work
+ *     queue to finish off removing device from upper layers. then send next
+ *     TARGET_RESET in the queue.
  **/
-static void
-mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
+static int
+mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
 {
        MPT_SCSI_HOST   *hd = shost_priv(ioc->sh);
         struct list_head *head = &hd->target_reset_list;
-       struct mptsas_target_reset_event *target_reset_list;
-       struct mptsas_hotplug_event *ev;
-       EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
        u8              id, channel;
-       __le64          sas_address;
+       struct mptsas_target_reset_event        *target_reset_list;
+       SCSITaskMgmtReply_t *pScsiTmReply;
+
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed: "
+           "(mf = %p, mr = %p)\n", ioc->name, mf, mr));
+
+       pScsiTmReply = (SCSITaskMgmtReply_t *)mr;
+       if (pScsiTmReply) {
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "\tTaskMgmt completed: fw_channel = %d, fw_id = %d,\n"
+                   "\ttask_type = 0x%02X, iocstatus = 0x%04X "
+                   "loginfo = 0x%08X,\n\tresponse_code = 0x%02X, "
+                   "term_cmnds = %d\n", ioc->name,
+                   pScsiTmReply->Bus, pScsiTmReply->TargetID,
+                   pScsiTmReply->TaskType,
+                   le16_to_cpu(pScsiTmReply->IOCStatus),
+                   le32_to_cpu(pScsiTmReply->IOCLogInfo),
+                   pScsiTmReply->ResponseCode,
+                   le32_to_cpu(pScsiTmReply->TerminationCount)));
+
+               if (pScsiTmReply->ResponseCode)
+                       mptscsih_taskmgmt_response_code(ioc,
+                       pScsiTmReply->ResponseCode);
+       }
+
+       if (pScsiTmReply && (pScsiTmReply->TaskType ==
+           MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK || pScsiTmReply->TaskType ==
+            MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET)) {
+               ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+               ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+               memcpy(ioc->taskmgmt_cmds.reply, mr,
+                   min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
+               if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+                       ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+                       complete(&ioc->taskmgmt_cmds.done);
+                       return 1;
+               }
+               return 0;
+       }
+
+       mpt_clear_taskmgmt_in_progress_flag(ioc);
 
        if (list_empty(head))
-               return;
+               return 1;
 
-       target_reset_list = list_entry(head->next, struct mptsas_target_reset_event, list);
+       target_reset_list = list_entry(head->next,
+           struct mptsas_target_reset_event, list);
 
-       sas_event_data = &target_reset_list->sas_event_data;
-       id = sas_event_data->TargetID;
-       channel = sas_event_data->Bus;
-       hd->resetPending = 0;
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "TaskMgmt: completed (%d seconds)\n",
+           ioc->name, jiffies_to_msecs(jiffies -
+           target_reset_list->time_count)/1000));
+
+       id = pScsiTmReply->TargetID;
+       channel = pScsiTmReply->Bus;
+       target_reset_list->time_count = jiffies;
 
        /*
         * retry target reset
         */
        if (!target_reset_list->target_reset_issued) {
-               if (mptsas_target_reset(ioc, channel, id)) {
+               if (mptsas_target_reset(ioc, channel, id))
                        target_reset_list->target_reset_issued = 1;
-                       hd->resetPending = 1;
-               }
-               return;
+               return 1;
        }
 
        /*
         * enable work queue to remove device from upper layers
         */
        list_del(&target_reset_list->list);
+       if ((mptsas_find_vtarget(ioc, channel, id)) && !ioc->fw_events_off)
+               mptsas_queue_device_delete(ioc,
+                       &target_reset_list->sas_event_data);
 
-       ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-       if (!ev) {
-               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s, failed to allocate mem @%d..!!\n",
-                   ioc->name,__func__, __LINE__));
-               return;
-       }
-
-       INIT_WORK(&ev->work, mptsas_hotplug_work);
-       ev->ioc = ioc;
-       ev->handle = le16_to_cpu(sas_event_data->DevHandle);
-       ev->parent_handle =
-           le16_to_cpu(sas_event_data->ParentDevHandle);
-       ev->channel = channel;
-       ev->id =id;
-       ev->phy_id = sas_event_data->PhyNum;
-       memcpy(&sas_address, &sas_event_data->SASAddress,
-           sizeof(__le64));
-       ev->sas_address = le64_to_cpu(sas_address);
-       ev->device_info = le32_to_cpu(sas_event_data->DeviceInfo);
-       ev->event_type = MPTSAS_DEL_DEVICE;
-       schedule_work(&ev->work);
-       kfree(target_reset_list);
 
        /*
         * issue target reset to next device in the queue
@@ -699,34 +1217,19 @@ mptsas_dev_reset_complete(MPT_ADAPTER *ioc)
 
        head = &hd->target_reset_list;
        if (list_empty(head))
-               return;
+               return 1;
 
        target_reset_list = list_entry(head->next, struct mptsas_target_reset_event,
            list);
 
-       sas_event_data = &target_reset_list->sas_event_data;
-       id = sas_event_data->TargetID;
-       channel = sas_event_data->Bus;
+       id = target_reset_list->sas_event_data.TargetID;
+       channel = target_reset_list->sas_event_data.Bus;
+       target_reset_list->time_count = jiffies;
 
-       if (mptsas_target_reset(ioc, channel, id)) {
+       if (mptsas_target_reset(ioc, channel, id))
                target_reset_list->target_reset_issued = 1;
-               hd->resetPending = 1;
-       }
-}
 
-/**
- * mptsas_taskmgmt_complete
- *
- * @ioc
- * @mf
- * @mr
- *
- **/
-static int
-mptsas_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
-{
-       mptsas_dev_reset_complete(ioc);
-       return mptscsih_taskmgmt_complete(ioc, mf, mr);
+       return 1;
 }
 
 /**
@@ -740,37 +1243,59 @@ static int
 mptsas_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 {
        MPT_SCSI_HOST   *hd;
-       struct mptsas_target_reset_event *target_reset_list, *n;
        int rc;
 
        rc = mptscsih_ioc_reset(ioc, reset_phase);
+       if ((ioc->bus_type != SAS) || (!rc))
+               return rc;
 
-       if (ioc->bus_type != SAS)
-               goto out;
-
-       if (reset_phase != MPT_IOC_POST_RESET)
-               goto out;
-
-       if (!ioc->sh || !ioc->sh->hostdata)
-               goto out;
        hd = shost_priv(ioc->sh);
        if (!hd->ioc)
                goto out;
 
-       if (list_empty(&hd->target_reset_list))
-               goto out;
-
-       /* flush the target_reset_list */
-       list_for_each_entry_safe(target_reset_list, n,
-           &hd->target_reset_list, list) {
-               list_del(&target_reset_list->list);
-               kfree(target_reset_list);
+       switch (reset_phase) {
+       case MPT_IOC_SETUP_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+               mptsas_fw_event_off(ioc);
+               break;
+       case MPT_IOC_PRE_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
+               break;
+       case MPT_IOC_POST_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+               if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
+                       ioc->sas_mgmt.status |= MPT_MGMT_STATUS_DID_IOCRESET;
+                       complete(&ioc->sas_mgmt.done);
+               }
+               mptsas_cleanup_fw_event_q(ioc);
+               mptsas_queue_rescan(ioc);
+               mptsas_fw_event_on(ioc);
+               break;
+       default:
+               break;
        }
 
  out:
        return rc;
 }
 
+
+/**
+ * enum device_state -
+ * @DEVICE_RETRY: need to retry the TUR
+ * @DEVICE_ERROR: TUR return error, don't add device
+ * @DEVICE_READY: device can be added
+ *
+ */
+enum device_state{
+       DEVICE_RETRY,
+       DEVICE_ERROR,
+       DEVICE_READY,
+};
+
 static int
 mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
                u32 form, u32 form_specific)
@@ -836,15 +1361,308 @@ mptsas_sas_enclosure_pg0(MPT_ADAPTER *ioc, struct mptsas_enclosure *enclosure,
        return error;
 }
 
+/**
+ *     mptsas_add_end_device - report a new end device to sas transport layer
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @phy_info: decribes attached device
+ *
+ *     return (0) success (1) failure
+ *
+ **/
+static int
+mptsas_add_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
+{
+       struct sas_rphy *rphy;
+       struct sas_port *port;
+       struct sas_identify identify;
+       char *ds = NULL;
+       u8 fw_id;
+
+       if (!phy_info) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: exit at line=%d\n", ioc->name,
+                        __func__, __LINE__));
+               return 1;
+       }
+
+       fw_id = phy_info->attached.id;
+
+       if (mptsas_get_rphy(phy_info)) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, fw_id, __LINE__));
+               return 2;
+       }
+
+       port = mptsas_get_port(phy_info);
+       if (!port) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, fw_id, __LINE__));
+               return 3;
+       }
+
+       if (phy_info->attached.device_info &
+           MPI_SAS_DEVICE_INFO_SSP_TARGET)
+               ds = "ssp";
+       if (phy_info->attached.device_info &
+           MPI_SAS_DEVICE_INFO_STP_TARGET)
+               ds = "stp";
+       if (phy_info->attached.device_info &
+           MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+               ds = "sata";
+
+       printk(MYIOC_s_INFO_FMT "attaching %s device: fw_channel %d, fw_id %d,"
+           " phy %d, sas_addr 0x%llx\n", ioc->name, ds,
+           phy_info->attached.channel, phy_info->attached.id,
+           phy_info->attached.phy_id, (unsigned long long)
+           phy_info->attached.sas_address);
+
+       mptsas_parse_device_info(&identify, &phy_info->attached);
+       rphy = sas_end_device_alloc(port);
+       if (!rphy) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, fw_id, __LINE__));
+               return 5; /* non-fatal: an rphy can be added later */
+       }
+
+       rphy->identify = identify;
+       if (sas_rphy_add(rphy)) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, fw_id, __LINE__));
+               sas_rphy_free(rphy);
+               return 6;
+       }
+       mptsas_set_rphy(ioc, phy_info, rphy);
+       return 0;
+}
+
+/**
+ *     mptsas_del_end_device - report a deleted end device to sas transport layer
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @phy_info: decribes attached device
+ *
+ **/
+static void
+mptsas_del_end_device(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info)
+{
+       struct sas_rphy *rphy;
+       struct sas_port *port;
+       struct mptsas_portinfo *port_info;
+       struct mptsas_phyinfo *phy_info_parent;
+       int i;
+       char *ds = NULL;
+       u8 fw_id;
+       u64 sas_address;
+
+       if (!phy_info)
+               return;
+
+       fw_id = phy_info->attached.id;
+       sas_address = phy_info->attached.sas_address;
+
+       if (!phy_info->port_details) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, fw_id, __LINE__));
+               return;
+       }
+       rphy = mptsas_get_rphy(phy_info);
+       if (!rphy) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, fw_id, __LINE__));
+               return;
+       }
+
+       if (phy_info->attached.device_info & MPI_SAS_DEVICE_INFO_SSP_INITIATOR
+               || phy_info->attached.device_info
+                       & MPI_SAS_DEVICE_INFO_SMP_INITIATOR
+               || phy_info->attached.device_info
+                       & MPI_SAS_DEVICE_INFO_STP_INITIATOR)
+               ds = "initiator";
+       if (phy_info->attached.device_info &
+           MPI_SAS_DEVICE_INFO_SSP_TARGET)
+               ds = "ssp";
+       if (phy_info->attached.device_info &
+           MPI_SAS_DEVICE_INFO_STP_TARGET)
+               ds = "stp";
+       if (phy_info->attached.device_info &
+           MPI_SAS_DEVICE_INFO_SATA_DEVICE)
+               ds = "sata";
+
+       dev_printk(KERN_DEBUG, &rphy->dev, MYIOC_s_FMT
+           "removing %s device: fw_channel %d, fw_id %d, phy %d,"
+           "sas_addr 0x%llx\n", ioc->name, ds, phy_info->attached.channel,
+           phy_info->attached.id, phy_info->attached.phy_id,
+           (unsigned long long) sas_address);
+
+       port = mptsas_get_port(phy_info);
+       if (!port) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, fw_id, __LINE__));
+               return;
+       }
+       port_info = phy_info->portinfo;
+       phy_info_parent = port_info->phy_info;
+       for (i = 0; i < port_info->num_phys; i++, phy_info_parent++) {
+               if (!phy_info_parent->phy)
+                       continue;
+               if (phy_info_parent->attached.sas_address !=
+                   sas_address)
+                       continue;
+               dev_printk(KERN_DEBUG, &phy_info_parent->phy->dev,
+                   MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n",
+                   ioc->name, phy_info_parent->phy_id,
+                   phy_info_parent->phy);
+               sas_port_delete_phy(port, phy_info_parent->phy);
+       }
+
+       dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
+           "delete port %d, sas_addr (0x%llx)\n", ioc->name,
+            port->port_identifier, (unsigned long long)sas_address);
+       sas_port_delete(port);
+       mptsas_set_port(ioc, phy_info, NULL);
+       mptsas_port_delete(ioc, phy_info->port_details);
+}
+
+struct mptsas_phyinfo *
+mptsas_refreshing_device_handles(MPT_ADAPTER *ioc,
+       struct mptsas_devinfo *sas_device)
+{
+       struct mptsas_phyinfo *phy_info;
+       struct mptsas_portinfo *port_info;
+       int i;
+
+       phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+           sas_device->sas_address);
+       if (!phy_info)
+               goto out;
+       port_info = phy_info->portinfo;
+       if (!port_info)
+               goto out;
+       mutex_lock(&ioc->sas_topology_mutex);
+       for (i = 0; i < port_info->num_phys; i++) {
+               if (port_info->phy_info[i].attached.sas_address !=
+                       sas_device->sas_address)
+                       continue;
+               port_info->phy_info[i].attached.channel = sas_device->channel;
+               port_info->phy_info[i].attached.id = sas_device->id;
+               port_info->phy_info[i].attached.sas_address =
+                   sas_device->sas_address;
+               port_info->phy_info[i].attached.handle = sas_device->handle;
+               port_info->phy_info[i].attached.handle_parent =
+                   sas_device->handle_parent;
+               port_info->phy_info[i].attached.handle_enclosure =
+                   sas_device->handle_enclosure;
+       }
+       mutex_unlock(&ioc->sas_topology_mutex);
+ out:
+       return phy_info;
+}
+
+/**
+ * mptsas_firmware_event_work - work thread for processing fw events
+ * @work: work queue payload containing info describing the event
+ * Context: user
+ *
+ */
+static void
+mptsas_firmware_event_work(struct work_struct *work)
+{
+       struct fw_event_work *fw_event =
+               container_of(work, struct fw_event_work, work.work);
+       MPT_ADAPTER *ioc = fw_event->ioc;
+
+       /* special rescan topology handling */
+       if (fw_event->event == -1) {
+               if (ioc->in_rescan) {
+                       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                               "%s: rescan ignored as it is in progress\n",
+                               ioc->name, __func__));
+                       return;
+               }
+               devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: rescan after "
+                   "reset\n", ioc->name, __func__));
+               ioc->in_rescan = 1;
+               mptsas_not_responding_devices(ioc);
+               mptsas_scan_sas_topology(ioc);
+               ioc->in_rescan = 0;
+               mptsas_free_fw_event(ioc, fw_event);
+               return;
+       }
+
+       /* events handling turned off during host reset */
+       if (ioc->fw_events_off) {
+               mptsas_free_fw_event(ioc, fw_event);
+               return;
+       }
+
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: fw_event=(0x%p), "
+           "event = (0x%02x)\n", ioc->name, __func__, fw_event,
+           (fw_event->event & 0xFF)));
+
+       switch (fw_event->event) {
+       case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
+               mptsas_send_sas_event(fw_event);
+               break;
+       case MPI_EVENT_INTEGRATED_RAID:
+               mptsas_send_raid_event(fw_event);
+               break;
+       case MPI_EVENT_IR2:
+               mptsas_send_ir2_event(fw_event);
+               break;
+       case MPI_EVENT_PERSISTENT_TABLE_FULL:
+               mptbase_sas_persist_operation(ioc,
+                   MPI_SAS_OP_CLEAR_NOT_PRESENT);
+               mptsas_free_fw_event(ioc, fw_event);
+               break;
+       case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
+               mptsas_broadcast_primative_work(fw_event);
+               break;
+       case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+               mptsas_send_expander_event(fw_event);
+               break;
+       case MPI_EVENT_SAS_PHY_LINK_STATUS:
+               mptsas_send_link_status_event(fw_event);
+               break;
+       case MPI_EVENT_QUEUE_FULL:
+               mptsas_handle_queue_full_event(fw_event);
+               break;
+       }
+}
+
+
+
 static int
 mptsas_slave_configure(struct scsi_device *sdev)
 {
+       struct Scsi_Host        *host = sdev->host;
+       MPT_SCSI_HOST   *hd = shost_priv(host);
+       MPT_ADAPTER     *ioc = hd->ioc;
+       VirtDevice      *vdevice = sdev->hostdata;
 
-       if (sdev->channel == MPTSAS_RAID_CHANNEL)
+       if (vdevice->vtarget->deleted) {
+               sdev_printk(KERN_INFO, sdev, "clearing deleted flag\n");
+               vdevice->vtarget->deleted = 0;
+       }
+
+       /*
+        * RAID volumes placed beyond the last expected port.
+        * Ignore sending sas mode pages in that case..
+        */
+       if (sdev->channel == MPTSAS_RAID_CHANNEL) {
+               mptsas_add_device_component_starget_ir(ioc, scsi_target(sdev));
                goto out;
+       }
 
        sas_read_port_mode_page(sdev);
 
+       mptsas_add_device_component_starget(ioc, scsi_target(sdev));
+
  out:
        return mptscsih_slave_configure(sdev);
 }
@@ -875,9 +1693,18 @@ mptsas_target_alloc(struct scsi_target *starget)
         * RAID volumes placed beyond the last expected port.
         */
        if (starget->channel == MPTSAS_RAID_CHANNEL) {
-               for (i=0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
-                       if (id == ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID)
-                               channel = ioc->raid_data.pIocPg2->RaidVolume[i].VolumeBus;
+               if (!ioc->raid_data.pIocPg2) {
+                       kfree(vtarget);
+                       return -ENXIO;
+               }
+               for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+                       if (id == ioc->raid_data.pIocPg2->
+                                       RaidVolume[i].VolumeID) {
+                               channel = ioc->raid_data.pIocPg2->
+                                       RaidVolume[i].VolumeBus;
+                       }
+               }
+               vtarget->raidVolume = 1;
                goto out;
        }
 
@@ -926,11 +1753,18 @@ mptsas_target_destroy(struct scsi_target *starget)
        struct sas_rphy         *rphy;
        struct mptsas_portinfo  *p;
        int                      i;
-       MPT_ADAPTER *ioc = hd->ioc;
+       MPT_ADAPTER     *ioc = hd->ioc;
+       VirtTarget      *vtarget;
 
        if (!starget->hostdata)
                return;
 
+       vtarget = starget->hostdata;
+
+       mptsas_del_device_component_by_os(ioc, starget->channel,
+           starget->id);
+
+
        if (starget->channel == MPTSAS_RAID_CHANNEL)
                goto out;
 
@@ -940,12 +1774,21 @@ mptsas_target_destroy(struct scsi_target *starget)
                        if (p->phy_info[i].attached.sas_address !=
                                        rphy->identify.sas_address)
                                continue;
+
+                       starget_printk(KERN_INFO, starget, MYIOC_s_FMT
+                       "delete device: fw_channel %d, fw_id %d, phy %d, "
+                       "sas_addr 0x%llx\n", ioc->name,
+                       p->phy_info[i].attached.channel,
+                       p->phy_info[i].attached.id,
+                       p->phy_info[i].attached.phy_id, (unsigned long long)
+                       p->phy_info[i].attached.sas_address);
+
                        mptsas_set_starget(&p->phy_info[i], NULL);
-                       goto out;
                }
        }
 
  out:
+       vtarget->starget = NULL;
        kfree(starget->hostdata);
        starget->hostdata = NULL;
 }
@@ -1008,6 +1851,8 @@ mptsas_slave_alloc(struct scsi_device *sdev)
 static int
 mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
 {
+       MPT_SCSI_HOST   *hd;
+       MPT_ADAPTER     *ioc;
        VirtDevice      *vdevice = SCpnt->device->hostdata;
 
        if (!vdevice || !vdevice->vtarget || vdevice->vtarget->deleted) {
@@ -1016,6 +1861,12 @@ mptsas_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
                return 0;
        }
 
+       hd = shost_priv(SCpnt->device->host);
+       ioc = hd->ioc;
+
+       if (ioc->sas_discovery_quiesce_io)
+               return SCSI_MLQUEUE_HOST_BUSY;
+
 //     scsi_print_command(SCpnt);
 
        return mptscsih_qcmd(SCpnt,done);
@@ -1114,14 +1965,19 @@ static int mptsas_get_linkerrors(struct sas_phy *phy)
 static int mptsas_mgmt_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
                MPT_FRAME_HDR *reply)
 {
-       ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_COMMAND_GOOD;
+       ioc->sas_mgmt.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
        if (reply != NULL) {
-               ioc->sas_mgmt.status |= MPT_SAS_MGMT_STATUS_RF_VALID;
+               ioc->sas_mgmt.status |= MPT_MGMT_STATUS_RF_VALID;
                memcpy(ioc->sas_mgmt.reply, reply,
                    min(ioc->reply_sz, 4 * reply->u.reply.MsgLength));
        }
-       complete(&ioc->sas_mgmt.done);
-       return 1;
+
+       if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_PENDING) {
+               ioc->sas_mgmt.status &= ~MPT_MGMT_STATUS_PENDING;
+               complete(&ioc->sas_mgmt.done);
+               return 1;
+       }
+       return 0;
 }
 
 static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
@@ -1160,6 +2016,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
                MPI_SAS_OP_PHY_HARD_RESET : MPI_SAS_OP_PHY_LINK_RESET;
        req->PhyNum = phy->identify.phy_identifier;
 
+       INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
        mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
 
        timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done,
@@ -1174,7 +2031,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
 
        /* a reply frame is expected */
        if ((ioc->sas_mgmt.status &
-           MPT_IOCTL_STATUS_RF_VALID) == 0) {
+           MPT_MGMT_STATUS_RF_VALID) == 0) {
                error = -ENXIO;
                goto out_unlock;
        }
@@ -1191,6 +2048,7 @@ static int mptsas_phy_reset(struct sas_phy *phy, int hard_reset)
        error = 0;
 
  out_unlock:
+       CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
        mutex_unlock(&ioc->sas_mgmt.mutex);
  out:
        return error;
@@ -1304,7 +2162,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                struct mptsas_portinfo *port_info;
 
                mutex_lock(&ioc->sas_topology_mutex);
-               port_info = mptsas_get_hba_portinfo(ioc);
+               port_info = ioc->hba_port_info;
                if (port_info && port_info->phy_info)
                        sas_address =
                                port_info->phy_info[0].phy->identify.sas_address;
@@ -1319,26 +2177,32 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        /* request */
        flagsLength = (MPI_SGE_FLAGS_SIMPLE_ELEMENT |
                       MPI_SGE_FLAGS_END_OF_BUFFER |
-                      MPI_SGE_FLAGS_DIRECTION |
-                      mpt_addr_size()) << MPI_SGE_FLAGS_SHIFT;
+                      MPI_SGE_FLAGS_DIRECTION)
+                      << MPI_SGE_FLAGS_SHIFT;
        flagsLength |= (blk_rq_bytes(req) - 4);
 
        dma_addr_out = pci_map_single(ioc->pcidev, bio_data(req->bio),
                                      blk_rq_bytes(req), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_out)
                goto put_mf;
-       mpt_add_sge(psge, flagsLength, dma_addr_out);
-       psge += (sizeof(u32) + sizeof(dma_addr_t));
+       ioc->add_sge(psge, flagsLength, dma_addr_out);
+       psge += ioc->SGE_size;
 
        /* response */
-       flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
+       flagsLength = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+               MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+               MPI_SGE_FLAGS_IOC_TO_HOST |
+               MPI_SGE_FLAGS_END_OF_BUFFER;
+
+       flagsLength = flagsLength << MPI_SGE_FLAGS_SHIFT;
        flagsLength |= blk_rq_bytes(rsp) + 4;
        dma_addr_in =  pci_map_single(ioc->pcidev, bio_data(rsp->bio),
                                      blk_rq_bytes(rsp), PCI_DMA_BIDIRECTIONAL);
        if (!dma_addr_in)
                goto unmap;
-       mpt_add_sge(psge, flagsLength, dma_addr_in);
+       ioc->add_sge(psge, flagsLength, dma_addr_in);
 
+       INITIALIZE_MGMT_STATUS(ioc->sas_mgmt.status)
        mpt_put_msg_frame(mptsasMgmtCtx, ioc, mf);
 
        timeleft = wait_for_completion_timeout(&ioc->sas_mgmt.done, 10 * HZ);
@@ -1351,7 +2215,7 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
        }
        mf = NULL;
 
-       if (ioc->sas_mgmt.status & MPT_IOCTL_STATUS_RF_VALID) {
+       if (ioc->sas_mgmt.status & MPT_MGMT_STATUS_RF_VALID) {
                SmpPassthroughReply_t *smprep;
 
                smprep = (SmpPassthroughReply_t *)ioc->sas_mgmt.reply;
@@ -1360,7 +2224,8 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
                req->resid_len = 0;
                rsp->resid_len -= smprep->ResponseDataLength;
        } else {
-               printk(MYIOC_s_ERR_FMT "%s: smp passthru reply failed to be returned\n",
+               printk(MYIOC_s_ERR_FMT
+                   "%s: smp passthru reply failed to be returned\n",
                    ioc->name, __func__);
                ret = -ENXIO;
        }
@@ -1375,6 +2240,7 @@ put_mf:
        if (mf)
                mpt_free_msg_frame(ioc, mf);
 out_unlock:
+       CLEAR_MGMT_STATUS(ioc->sas_mgmt.status)
        mutex_unlock(&ioc->sas_mgmt.mutex);
 out:
        return ret;
@@ -1438,7 +2304,7 @@ mptsas_sas_io_unit_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
 
        port_info->num_phys = buffer->NumPhys;
        port_info->phy_info = kcalloc(port_info->num_phys,
-               sizeof(*port_info->phy_info),GFP_KERNEL);
+               sizeof(struct mptsas_phyinfo), GFP_KERNEL);
        if (!port_info->phy_info) {
                error = -ENOMEM;
                goto out_free_consistent;
@@ -1600,10 +2466,6 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
        __le64 sas_address;
        int error=0;
 
-       if (ioc->sas_discovery_runtime &&
-               mptsas_is_end_device(device_info))
-                       goto out;
-
        hdr.PageVersion = MPI_SASDEVICE0_PAGEVERSION;
        hdr.ExtPageLength = 0;
        hdr.PageNumber = 0;
@@ -1644,6 +2506,7 @@ mptsas_sas_device_pg0(MPT_ADAPTER *ioc, struct mptsas_devinfo *device_info,
 
        mptsas_print_device_pg0(ioc, buffer);
 
+       memset(device_info, 0, sizeof(struct mptsas_devinfo));
        device_info->handle = le16_to_cpu(buffer->DevHandle);
        device_info->handle_parent = le16_to_cpu(buffer->ParentDevHandle);
        device_info->handle_enclosure =
@@ -1675,7 +2538,9 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
        SasExpanderPage0_t *buffer;
        dma_addr_t dma_handle;
        int i, error;
+       __le64 sas_address;
 
+       memset(port_info, 0, sizeof(struct mptsas_portinfo));
        hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
        hdr.ExtPageLength = 0;
        hdr.PageNumber = 0;
@@ -1721,18 +2586,23 @@ mptsas_sas_expander_pg0(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info,
        }
 
        /* save config data */
-       port_info->num_phys = buffer->NumPhys;
+       port_info->num_phys = (buffer->NumPhys) ? buffer->NumPhys : 1;
        port_info->phy_info = kcalloc(port_info->num_phys,
-               sizeof(*port_info->phy_info),GFP_KERNEL);
+               sizeof(struct mptsas_phyinfo), GFP_KERNEL);
        if (!port_info->phy_info) {
                error = -ENOMEM;
                goto out_free_consistent;
        }
 
+       memcpy(&sas_address, &buffer->SASAddress, sizeof(__le64));
        for (i = 0; i < port_info->num_phys; i++) {
                port_info->phy_info[i].portinfo = port_info;
                port_info->phy_info[i].handle =
                    le16_to_cpu(buffer->DevHandle);
+               port_info->phy_info[i].identify.sas_address =
+                   le64_to_cpu(sas_address);
+               port_info->phy_info[i].identify.handle_parent =
+                   le16_to_cpu(buffer->ParentDevHandle);
        }
 
  out_free_consistent:
@@ -1752,11 +2622,7 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
        dma_addr_t dma_handle;
        int error=0;
 
-       if (ioc->sas_discovery_runtime &&
-               mptsas_is_end_device(&phy_info->attached))
-                       goto out;
-
-       hdr.PageVersion = MPI_SASEXPANDER0_PAGEVERSION;
+       hdr.PageVersion = MPI_SASEXPANDER1_PAGEVERSION;
        hdr.ExtPageLength = 0;
        hdr.PageNumber = 1;
        hdr.Reserved1 = 0;
@@ -1791,6 +2657,12 @@ mptsas_sas_expander_pg1(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info,
        cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
 
        error = mpt_config(ioc, &cfg);
+
+       if (error == MPI_IOCSTATUS_CONFIG_INVALID_PAGE) {
+               error = -ENODEV;
+               goto out;
+       }
+
        if (error)
                goto out_free_consistent;
 
@@ -2010,16 +2882,21 @@ static int mptsas_probe_one_phy(struct device *dev,
                                goto out;
                        }
                        mptsas_set_port(ioc, phy_info, port);
-                       dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-                           "sas_port_alloc: port=%p dev=%p port_id=%d\n",
-                           ioc->name, port, dev, port->port_identifier));
+                       devtprintk(ioc, dev_printk(KERN_DEBUG, &port->dev,
+                           MYIOC_s_FMT "add port %d, sas_addr (0x%llx)\n",
+                           ioc->name, port->port_identifier,
+                           (unsigned long long)phy_info->
+                           attached.sas_address));
                }
-               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_port_add_phy: phy_id=%d\n",
-                   ioc->name, phy_info->phy_id));
+               dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                       "sas_port_add_phy: phy_id=%d\n",
+                       ioc->name, phy_info->phy_id));
                sas_port_add_phy(port, phy_info->phy);
                phy_info->sas_port_add_phy = 0;
+               devtprintk(ioc, dev_printk(KERN_DEBUG, &phy_info->phy->dev,
+                   MYIOC_s_FMT "add phy %d, phy-obj (0x%p)\n", ioc->name,
+                    phy_info->phy_id, phy_info->phy));
        }
-
        if (!mptsas_get_rphy(phy_info) && port && !port->rphy) {
 
                struct sas_rphy *rphy;
@@ -2032,18 +2909,17 @@ static int mptsas_probe_one_phy(struct device *dev,
                 * the adding/removing of devices that occur
                 * after start of day.
                 */
-               if (ioc->sas_discovery_runtime &&
-                       mptsas_is_end_device(&phy_info->attached))
-                               goto out;
+               if (mptsas_is_end_device(&phy_info->attached) &&
+                   phy_info->attached.handle_parent) {
+                       goto out;
+               }
 
                mptsas_parse_device_info(&identify, &phy_info->attached);
                if (scsi_is_host_device(parent)) {
                        struct mptsas_portinfo *port_info;
                        int i;
 
-                       mutex_lock(&ioc->sas_topology_mutex);
-                       port_info = mptsas_get_hba_portinfo(ioc);
-                       mutex_unlock(&ioc->sas_topology_mutex);
+                       port_info = ioc->hba_port_info;
 
                        for (i = 0; i < port_info->num_phys; i++)
                                if (port_info->phy_info[i].identify.sas_address ==
@@ -2102,7 +2978,7 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
        struct mptsas_portinfo *port_info, *hba;
        int error = -ENOMEM, i;
 
-       hba = kzalloc(sizeof(*port_info), GFP_KERNEL);
+       hba = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
        if (! hba)
                goto out;
 
@@ -2112,9 +2988,10 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
 
        mptsas_sas_io_unit_pg1(ioc);
        mutex_lock(&ioc->sas_topology_mutex);
-       port_info = mptsas_get_hba_portinfo(ioc);
+       port_info = ioc->hba_port_info;
        if (!port_info) {
-               port_info = hba;
+               ioc->hba_port_info = port_info = hba;
+               ioc->hba_port_num_phy = port_info->num_phys;
                list_add_tail(&port_info->list, &ioc->sas_topology);
        } else {
                for (i = 0; i < hba->num_phys; i++) {
@@ -2130,15 +3007,22 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
                hba = NULL;
        }
        mutex_unlock(&ioc->sas_topology_mutex);
+#if defined(CPQ_CIM)
+       ioc->num_ports = port_info->num_phys;
+#endif
        for (i = 0; i < port_info->num_phys; i++) {
                mptsas_sas_phy_pg0(ioc, &port_info->phy_info[i],
                        (MPI_SAS_PHY_PGAD_FORM_PHY_NUMBER <<
                         MPI_SAS_PHY_PGAD_FORM_SHIFT), i);
-
+               port_info->phy_info[i].identify.handle =
+                   port_info->phy_info[i].handle;
                mptsas_sas_device_pg0(ioc, &port_info->phy_info[i].identify,
                        (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
                         MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
-                        port_info->phy_info[i].handle);
+                        port_info->phy_info[i].identify.handle);
+               if (!ioc->hba_port_sas_addr)
+                       ioc->hba_port_sas_addr =
+                           port_info->phy_info[i].identify.sas_address;
                port_info->phy_info[i].identify.phy_id =
                    port_info->phy_info[i].phy_id = i;
                if (port_info->phy_info[i].attached.handle)
@@ -2163,248 +3047,721 @@ mptsas_probe_hba_phys(MPT_ADAPTER *ioc)
        return error;
 }
 
-static int
-mptsas_probe_expander_phys(MPT_ADAPTER *ioc, u32 *handle)
+static void
+mptsas_expander_refresh(MPT_ADAPTER *ioc, struct mptsas_portinfo *port_info)
 {
-       struct mptsas_portinfo *port_info, *p, *ex;
-       struct device *parent;
-       struct sas_rphy *rphy;
-       int error = -ENOMEM, i, j;
-
-       ex = kzalloc(sizeof(*port_info), GFP_KERNEL);
-       if (!ex)
-               goto out;
-
-       error = mptsas_sas_expander_pg0(ioc, ex,
-           (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
-            MPI_SAS_EXPAND_PGAD_FORM_SHIFT), *handle);
-       if (error)
-               goto out_free_port_info;
-
-       *handle = ex->phy_info[0].handle;
-
-       mutex_lock(&ioc->sas_topology_mutex);
-       port_info = mptsas_find_portinfo_by_handle(ioc, *handle);
-       if (!port_info) {
-               port_info = ex;
-               list_add_tail(&port_info->list, &ioc->sas_topology);
-       } else {
-               for (i = 0; i < ex->num_phys; i++) {
-                       port_info->phy_info[i].handle =
-                               ex->phy_info[i].handle;
-                       port_info->phy_info[i].port_id =
-                               ex->phy_info[i].port_id;
-               }
-               kfree(ex->phy_info);
-               kfree(ex);
-               ex = NULL;
-       }
-       mutex_unlock(&ioc->sas_topology_mutex);
-
+       struct mptsas_portinfo *parent;
+       struct device *parent_dev;
+       struct sas_rphy *rphy;
+       int             i;
+       u64             sas_address; /* expander sas address */
+       u32             handle;
+
+       handle = port_info->phy_info[0].handle;
+       sas_address = port_info->phy_info[0].identify.sas_address;
        for (i = 0; i < port_info->num_phys; i++) {
                mptsas_sas_expander_pg1(ioc, &port_info->phy_info[i],
-                       (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
-                        MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + *handle);
-
-               if (port_info->phy_info[i].identify.handle) {
-                       mptsas_sas_device_pg0(ioc,
-                               &port_info->phy_info[i].identify,
-                               (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
-                                MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
-                               port_info->phy_info[i].identify.handle);
-                       port_info->phy_info[i].identify.phy_id =
-                           port_info->phy_info[i].phy_id;
-               }
+                   (MPI_SAS_EXPAND_PGAD_FORM_HANDLE_PHY_NUM <<
+                   MPI_SAS_EXPAND_PGAD_FORM_SHIFT), (i << 16) + handle);
+
+               mptsas_sas_device_pg0(ioc,
+                   &port_info->phy_info[i].identify,
+                   (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+                   MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+                   port_info->phy_info[i].identify.handle);
+               port_info->phy_info[i].identify.phy_id =
+                   port_info->phy_info[i].phy_id;
 
                if (port_info->phy_info[i].attached.handle) {
                        mptsas_sas_device_pg0(ioc,
-                               &port_info->phy_info[i].attached,
-                               (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
-                                MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
-                               port_info->phy_info[i].attached.handle);
+                           &port_info->phy_info[i].attached,
+                           (MPI_SAS_DEVICE_PGAD_FORM_HANDLE <<
+                            MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+                           port_info->phy_info[i].attached.handle);
                        port_info->phy_info[i].attached.phy_id =
                            port_info->phy_info[i].phy_id;
                }
        }
 
-       parent = &ioc->sh->shost_gendev;
-       for (i = 0; i < port_info->num_phys; i++) {
-               mutex_lock(&ioc->sas_topology_mutex);
-               list_for_each_entry(p, &ioc->sas_topology, list) {
-                       for (j = 0; j < p->num_phys; j++) {
-                               if (port_info->phy_info[i].identify.handle !=
-                                               p->phy_info[j].attached.handle)
-                                       continue;
-                               rphy = mptsas_get_rphy(&p->phy_info[j]);
-                               parent = &rphy->dev;
-                       }
-               }
+       mutex_lock(&ioc->sas_topology_mutex);
+       parent = mptsas_find_portinfo_by_handle(ioc,
+           port_info->phy_info[0].identify.handle_parent);
+       if (!parent) {
                mutex_unlock(&ioc->sas_topology_mutex);
+               return;
+       }
+       for (i = 0, parent_dev = NULL; i < parent->num_phys && !parent_dev;
+           i++) {
+               if (parent->phy_info[i].attached.sas_address == sas_address) {
+                       rphy = mptsas_get_rphy(&parent->phy_info[i]);
+                       parent_dev = &rphy->dev;
+               }
        }
+       mutex_unlock(&ioc->sas_topology_mutex);
 
        mptsas_setup_wide_ports(ioc, port_info);
-
        for (i = 0; i < port_info->num_phys; i++, ioc->sas_index++)
-               mptsas_probe_one_phy(parent, &port_info->phy_info[i],
+               mptsas_probe_one_phy(parent_dev, &port_info->phy_info[i],
                    ioc->sas_index, 0);
-
-       return 0;
-
- out_free_port_info:
-       if (ex) {
-               kfree(ex->phy_info);
-               kfree(ex);
-       }
- out:
-       return error;
 }
 
-/*
- * mptsas_delete_expander_phys
- *
- *
- * This will traverse topology, and remove expanders
- * that are no longer present
- */
 static void
-mptsas_delete_expander_phys(MPT_ADAPTER *ioc)
+mptsas_expander_event_add(MPT_ADAPTER *ioc,
+    MpiEventDataSasExpanderStatusChange_t *expander_data)
 {
-       struct mptsas_portinfo buffer;
-       struct mptsas_portinfo *port_info, *n, *parent;
-       struct mptsas_phyinfo *phy_info;
-       struct sas_port * port;
+       struct mptsas_portinfo *port_info;
        int i;
-       u64     expander_sas_address;
+       __le64 sas_address;
+
+       port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
+       if (!port_info)
+               BUG();
+       port_info->num_phys = (expander_data->NumPhys) ?
+           expander_data->NumPhys : 1;
+       port_info->phy_info = kcalloc(port_info->num_phys,
+           sizeof(struct mptsas_phyinfo), GFP_KERNEL);
+       if (!port_info->phy_info)
+               BUG();
+       memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
+       for (i = 0; i < port_info->num_phys; i++) {
+               port_info->phy_info[i].portinfo = port_info;
+               port_info->phy_info[i].handle =
+                   le16_to_cpu(expander_data->DevHandle);
+               port_info->phy_info[i].identify.sas_address =
+                   le64_to_cpu(sas_address);
+               port_info->phy_info[i].identify.handle_parent =
+                   le16_to_cpu(expander_data->ParentDevHandle);
+       }
 
        mutex_lock(&ioc->sas_topology_mutex);
-       list_for_each_entry_safe(port_info, n, &ioc->sas_topology, list) {
+       list_add_tail(&port_info->list, &ioc->sas_topology);
+       mutex_unlock(&ioc->sas_topology_mutex);
 
-               if (!(port_info->phy_info[0].identify.device_info &
-                   MPI_SAS_DEVICE_INFO_SMP_TARGET))
-                       continue;
+       printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+           "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+           (unsigned long long)sas_address);
 
-               if (mptsas_sas_expander_pg0(ioc, &buffer,
-                    (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
-                    MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
-                    port_info->phy_info[0].handle)) {
+       mptsas_expander_refresh(ioc, port_info);
+}
 
-                       /*
-                        * Obtain the port_info instance to the parent port
-                        */
-                       parent = mptsas_find_portinfo_by_handle(ioc,
-                           port_info->phy_info[0].identify.handle_parent);
-
-                       if (!parent)
-                               goto next_port;
+/**
+ * mptsas_delete_expander_siblings - remove siblings attached to expander
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @parent: the parent port_info object
+ * @expander: the expander port_info object
+ **/
+static void
+mptsas_delete_expander_siblings(MPT_ADAPTER *ioc, struct mptsas_portinfo
+    *parent, struct mptsas_portinfo *expander)
+{
+       struct mptsas_phyinfo *phy_info;
+       struct mptsas_portinfo *port_info;
+       struct sas_rphy *rphy;
+       int i;
 
-                       expander_sas_address =
-                               port_info->phy_info[0].identify.sas_address;
+       phy_info = expander->phy_info;
+       for (i = 0; i < expander->num_phys; i++, phy_info++) {
+               rphy = mptsas_get_rphy(phy_info);
+               if (!rphy)
+                       continue;
+               if (rphy->identify.device_type == SAS_END_DEVICE)
+                       mptsas_del_end_device(ioc, phy_info);
+       }
 
+       phy_info = expander->phy_info;
+       for (i = 0; i < expander->num_phys; i++, phy_info++) {
+               rphy = mptsas_get_rphy(phy_info);
+               if (!rphy)
+                       continue;
+               if (rphy->identify.device_type ==
+                   MPI_SAS_DEVICE_INFO_EDGE_EXPANDER ||
+                   rphy->identify.device_type ==
+                   MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER) {
+                       port_info = mptsas_find_portinfo_by_sas_address(ioc,
+                           rphy->identify.sas_address);
+                       if (!port_info)
+                               continue;
+                       if (port_info == parent) /* backlink rphy */
+                               continue;
                        /*
-                        * Delete rphys in the parent that point
-                        * to this expander.  The transport layer will
-                        * cleanup all the children.
-                        */
-                       phy_info = parent->phy_info;
-                       for (i = 0; i < parent->num_phys; i++, phy_info++) {
-                               port = mptsas_get_port(phy_info);
-                               if (!port)
-                                       continue;
-                               if (phy_info->attached.sas_address !=
-                                       expander_sas_address)
-                                       continue;
-                               dsaswideprintk(ioc,
-                                   dev_printk(KERN_DEBUG, &port->dev,
-                                   MYIOC_s_FMT "delete port (%d)\n", ioc->name,
-                                   port->port_identifier));
-                               sas_port_delete(port);
-                               mptsas_port_delete(ioc, phy_info->port_details);
-                       }
- next_port:
+                       Delete this expander even if the expdevpage is exists
+                       because the parent expander is already deleted
+                       */
+                       mptsas_expander_delete(ioc, port_info, 1);
+               }
+       }
+}
 
-                       phy_info = port_info->phy_info;
-                       for (i = 0; i < port_info->num_phys; i++, phy_info++)
-                               mptsas_port_delete(ioc, phy_info->port_details);
 
-                       list_del(&port_info->list);
-                       kfree(port_info->phy_info);
-                       kfree(port_info);
-               }
-               /*
-               * Free this memory allocated from inside
-               * mptsas_sas_expander_pg0
-               */
+/**
+ *     mptsas_expander_delete - remove this expander
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @port_info: expander port_info struct
+ *     @force: Flag to forcefully delete the expander
+ *
+ **/
+
+static void mptsas_expander_delete(MPT_ADAPTER *ioc,
+               struct mptsas_portinfo *port_info, u8 force)
+{
+
+       struct mptsas_portinfo *parent;
+       int             i;
+       u64             expander_sas_address;
+       struct mptsas_phyinfo *phy_info;
+       struct mptsas_portinfo buffer;
+       struct mptsas_portinfo_details *port_details;
+       struct sas_port *port;
+
+       if (!port_info)
+               return;
+
+       /* see if expander is still there before deleting */
+       mptsas_sas_expander_pg0(ioc, &buffer,
+           (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
+           MPI_SAS_EXPAND_PGAD_FORM_SHIFT),
+           port_info->phy_info[0].identify.handle);
+
+       if (buffer.num_phys) {
                kfree(buffer.phy_info);
+               if (!force)
+                       return;
        }
-       mutex_unlock(&ioc->sas_topology_mutex);
+
+
+       /*
+        * Obtain the port_info instance to the parent port
+        */
+       port_details = NULL;
+       expander_sas_address =
+           port_info->phy_info[0].identify.sas_address;
+       parent = mptsas_find_portinfo_by_handle(ioc,
+           port_info->phy_info[0].identify.handle_parent);
+       mptsas_delete_expander_siblings(ioc, parent, port_info);
+       if (!parent)
+               goto out;
+
+       /*
+        * Delete rphys in the parent that point
+        * to this expander.
+        */
+       phy_info = parent->phy_info;
+       port = NULL;
+       for (i = 0; i < parent->num_phys; i++, phy_info++) {
+               if (!phy_info->phy)
+                       continue;
+               if (phy_info->attached.sas_address !=
+                   expander_sas_address)
+                       continue;
+               if (!port) {
+                       port = mptsas_get_port(phy_info);
+                       port_details = phy_info->port_details;
+               }
+               dev_printk(KERN_DEBUG, &phy_info->phy->dev,
+                   MYIOC_s_FMT "delete phy %d, phy-obj (0x%p)\n", ioc->name,
+                   phy_info->phy_id, phy_info->phy);
+               sas_port_delete_phy(port, phy_info->phy);
+       }
+       if (port) {
+               dev_printk(KERN_DEBUG, &port->dev,
+                   MYIOC_s_FMT "delete port %d, sas_addr (0x%llx)\n",
+                   ioc->name, port->port_identifier,
+                   (unsigned long long)expander_sas_address);
+               sas_port_delete(port);
+               mptsas_port_delete(ioc, port_details);
+       }
+ out:
+
+       printk(MYIOC_s_INFO_FMT "delete expander: num_phys %d, "
+           "sas_addr (0x%llx)\n",  ioc->name, port_info->num_phys,
+           (unsigned long long)expander_sas_address);
+
+       /*
+        * free link
+        */
+       list_del(&port_info->list);
+       kfree(port_info->phy_info);
+       kfree(port_info);
 }
 
-/*
- * Start of day discovery
+
+/**
+ * mptsas_send_expander_event - expanders events
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @expander_data: event data
+ *
+ *
+ * This function handles adding, removing, and refreshing
+ * device handles within the expander objects.
  */
 static void
+mptsas_send_expander_event(struct fw_event_work *fw_event)
+{
+       MPT_ADAPTER *ioc;
+       MpiEventDataSasExpanderStatusChange_t *expander_data;
+       struct mptsas_portinfo *port_info;
+       __le64 sas_address;
+       int i;
+
+       ioc = fw_event->ioc;
+       expander_data = (MpiEventDataSasExpanderStatusChange_t *)
+           fw_event->event_data;
+       memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
+       port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
+
+       if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
+               if (port_info) {
+                       for (i = 0; i < port_info->num_phys; i++) {
+                               port_info->phy_info[i].portinfo = port_info;
+                               port_info->phy_info[i].handle =
+                                   le16_to_cpu(expander_data->DevHandle);
+                               port_info->phy_info[i].identify.sas_address =
+                                   le64_to_cpu(sas_address);
+                               port_info->phy_info[i].identify.handle_parent =
+                                   le16_to_cpu(expander_data->ParentDevHandle);
+                       }
+                       mptsas_expander_refresh(ioc, port_info);
+               } else if (!port_info && expander_data->NumPhys)
+                       mptsas_expander_event_add(ioc, expander_data);
+       } else if (expander_data->ReasonCode ==
+           MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING)
+               mptsas_expander_delete(ioc, port_info, 0);
+
+       mptsas_free_fw_event(ioc, fw_event);
+}
+
+
+/**
+ * mptsas_expander_add -
+ * @ioc: Pointer to MPT_ADAPTER structure
+ * @handle:
+ *
+ */
+struct mptsas_portinfo *
+mptsas_expander_add(MPT_ADAPTER *ioc, u16 handle)
+{
+       struct mptsas_portinfo buffer, *port_info;
+       int i;
+
+       if ((mptsas_sas_expander_pg0(ioc, &buffer,
+           (MPI_SAS_EXPAND_PGAD_FORM_HANDLE <<
+           MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)))
+               return NULL;
+
+       port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_ATOMIC);
+       if (!port_info) {
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+               "%s: exit at line=%d\n", ioc->name,
+               __func__, __LINE__));
+               return NULL;
+       }
+       port_info->num_phys = buffer.num_phys;
+       port_info->phy_info = buffer.phy_info;
+       for (i = 0; i < port_info->num_phys; i++)
+               port_info->phy_info[i].portinfo = port_info;
+       mutex_lock(&ioc->sas_topology_mutex);
+       list_add_tail(&port_info->list, &ioc->sas_topology);
+       mutex_unlock(&ioc->sas_topology_mutex);
+       printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+           "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+           (unsigned long long)buffer.phy_info[0].identify.sas_address);
+       mptsas_expander_refresh(ioc, port_info);
+       return port_info;
+}
+
+static void
+mptsas_send_link_status_event(struct fw_event_work *fw_event)
+{
+       MPT_ADAPTER *ioc;
+       MpiEventDataSasPhyLinkStatus_t *link_data;
+       struct mptsas_portinfo *port_info;
+       struct mptsas_phyinfo *phy_info = NULL;
+       __le64 sas_address;
+       u8 phy_num;
+       u8 link_rate;
+
+       ioc = fw_event->ioc;
+       link_data = (MpiEventDataSasPhyLinkStatus_t *)fw_event->event_data;
+
+       memcpy(&sas_address, &link_data->SASAddress, sizeof(__le64));
+       sas_address = le64_to_cpu(sas_address);
+       link_rate = link_data->LinkRates >> 4;
+       phy_num = link_data->PhyNum;
+
+       port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
+       if (port_info) {
+               phy_info = &port_info->phy_info[phy_num];
+               if (phy_info)
+                       phy_info->negotiated_link_rate = link_rate;
+       }
+
+       if (link_rate == MPI_SAS_IOUNIT0_RATE_1_5 ||
+           link_rate == MPI_SAS_IOUNIT0_RATE_3_0) {
+
+               if (!port_info) {
+                       if (ioc->old_sas_discovery_protocal) {
+                               port_info = mptsas_expander_add(ioc,
+                                       le16_to_cpu(link_data->DevHandle));
+                               if (port_info)
+                                       goto out;
+                       }
+                       goto out;
+               }
+
+               if (port_info == ioc->hba_port_info)
+                       mptsas_probe_hba_phys(ioc);
+               else
+                       mptsas_expander_refresh(ioc, port_info);
+       } else if (phy_info && phy_info->phy) {
+               if (link_rate ==  MPI_SAS_IOUNIT0_RATE_PHY_DISABLED)
+                       phy_info->phy->negotiated_linkrate =
+                           SAS_PHY_DISABLED;
+               else if (link_rate ==
+                   MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION)
+                       phy_info->phy->negotiated_linkrate =
+                           SAS_LINK_RATE_FAILED;
+               else
+                       phy_info->phy->negotiated_linkrate =
+                           SAS_LINK_RATE_UNKNOWN;
+       }
+ out:
+       mptsas_free_fw_event(ioc, fw_event);
+}
+
+static void
+mptsas_not_responding_devices(MPT_ADAPTER *ioc)
+{
+       struct mptsas_portinfo buffer, *port_info;
+       struct mptsas_device_info       *sas_info;
+       struct mptsas_devinfo sas_device;
+       u32     handle;
+       VirtTarget *vtarget = NULL;
+       struct mptsas_phyinfo *phy_info;
+       u8 found_expander;
+       int retval, retry_count;
+       unsigned long flags;
+
+       mpt_findImVolumes(ioc);
+
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       if (ioc->ioc_reset_in_progress) {
+               dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                  "%s: exiting due to a parallel reset \n", ioc->name,
+                   __func__));
+               spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+               return;
+       }
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+
+       /* devices, logical volumes */
+       mutex_lock(&ioc->sas_device_info_mutex);
+ redo_device_scan:
+       list_for_each_entry(sas_info, &ioc->sas_device_info_list, list) {
+               if (sas_info->is_cached)
+                       continue;
+               if (!sas_info->is_logical_volume) {
+                       sas_device.handle = 0;
+                       retry_count = 0;
+retry_page:
+                       retval = mptsas_sas_device_pg0(ioc, &sas_device,
+                               (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID
+                               << MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+                               (sas_info->fw.channel << 8) +
+                               sas_info->fw.id);
+
+                       if (sas_device.handle)
+                               continue;
+                       if (retval == -EBUSY) {
+                               spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+                               if (ioc->ioc_reset_in_progress) {
+                                       dfailprintk(ioc,
+                                       printk(MYIOC_s_DEBUG_FMT
+                                       "%s: exiting due to reset\n",
+                                       ioc->name, __func__));
+                                       spin_unlock_irqrestore
+                                       (&ioc->taskmgmt_lock, flags);
+                                       mutex_unlock(&ioc->
+                                       sas_device_info_mutex);
+                                       return;
+                               }
+                               spin_unlock_irqrestore(&ioc->taskmgmt_lock,
+                               flags);
+                       }
+
+                       if (retval && (retval != -ENODEV)) {
+                               if (retry_count < 10) {
+                                       retry_count++;
+                                       goto retry_page;
+                               } else {
+                                       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                                       "%s: Config page retry exceeded retry "
+                                       "count deleting device 0x%llx\n",
+                                       ioc->name, __func__,
+                                       sas_info->sas_address));
+                               }
+                       }
+
+                       /* delete device */
+                       vtarget = mptsas_find_vtarget(ioc,
+                               sas_info->fw.channel, sas_info->fw.id);
+
+                       if (vtarget)
+                               vtarget->deleted = 1;
+
+                       phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+                                       sas_info->sas_address);
+
+                       if (phy_info) {
+                               mptsas_del_end_device(ioc, phy_info);
+                               goto redo_device_scan;
+                       }
+               } else
+                       mptsas_volume_delete(ioc, sas_info->fw.id);
+       }
+       mutex_lock(&ioc->sas_device_info_mutex);
+
+       /* expanders */
+       mutex_lock(&ioc->sas_topology_mutex);
+ redo_expander_scan:
+       list_for_each_entry(port_info, &ioc->sas_topology, list) {
+
+               if (port_info->phy_info &&
+                   (!(port_info->phy_info[0].identify.device_info &
+                   MPI_SAS_DEVICE_INFO_SMP_TARGET)))
+                       continue;
+               found_expander = 0;
+               handle = 0xFFFF;
+               while (!mptsas_sas_expander_pg0(ioc, &buffer,
+                   (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
+                    MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle) &&
+                   !found_expander) {
+
+                       handle = buffer.phy_info[0].handle;
+                       if (buffer.phy_info[0].identify.sas_address ==
+                           port_info->phy_info[0].identify.sas_address) {
+                               found_expander = 1;
+                       }
+                       kfree(buffer.phy_info);
+               }
+
+               if (!found_expander) {
+                       mptsas_expander_delete(ioc, port_info, 0);
+                       goto redo_expander_scan;
+               }
+       }
+       mutex_lock(&ioc->sas_topology_mutex);
+}
+
+/**
+ *     mptsas_probe_expanders - adding expanders
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *
+ **/
+static void
+mptsas_probe_expanders(MPT_ADAPTER *ioc)
+{
+       struct mptsas_portinfo buffer, *port_info;
+       u32                     handle;
+       int i;
+
+       handle = 0xFFFF;
+       while (!mptsas_sas_expander_pg0(ioc, &buffer,
+           (MPI_SAS_EXPAND_PGAD_FORM_GET_NEXT_HANDLE <<
+            MPI_SAS_EXPAND_PGAD_FORM_SHIFT), handle)) {
+
+               handle = buffer.phy_info[0].handle;
+               port_info = mptsas_find_portinfo_by_sas_address(ioc,
+                   buffer.phy_info[0].identify.sas_address);
+
+               if (port_info) {
+                       /* refreshing handles */
+                       for (i = 0; i < buffer.num_phys; i++) {
+                               port_info->phy_info[i].handle = handle;
+                               port_info->phy_info[i].identify.handle_parent =
+                                   buffer.phy_info[0].identify.handle_parent;
+                       }
+                       mptsas_expander_refresh(ioc, port_info);
+                       kfree(buffer.phy_info);
+                       continue;
+               }
+
+               port_info = kzalloc(sizeof(struct mptsas_portinfo), GFP_KERNEL);
+               if (!port_info) {
+                       dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "%s: exit at line=%d\n", ioc->name,
+                       __func__, __LINE__));
+                       return;
+               }
+               port_info->num_phys = buffer.num_phys;
+               port_info->phy_info = buffer.phy_info;
+               for (i = 0; i < port_info->num_phys; i++)
+                       port_info->phy_info[i].portinfo = port_info;
+               mutex_lock(&ioc->sas_topology_mutex);
+               list_add_tail(&port_info->list, &ioc->sas_topology);
+               mutex_unlock(&ioc->sas_topology_mutex);
+               printk(MYIOC_s_INFO_FMT "add expander: num_phys %d, "
+                   "sas_addr (0x%llx)\n", ioc->name, port_info->num_phys,
+           (unsigned long long)buffer.phy_info[0].identify.sas_address);
+               mptsas_expander_refresh(ioc, port_info);
+       }
+}
+
+static void
+mptsas_probe_devices(MPT_ADAPTER *ioc)
+{
+       u16 handle;
+       struct mptsas_devinfo sas_device;
+       struct mptsas_phyinfo *phy_info;
+
+       handle = 0xFFFF;
+       while (!(mptsas_sas_device_pg0(ioc, &sas_device,
+           MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE, handle))) {
+
+               handle = sas_device.handle;
+
+               if ((sas_device.device_info &
+                    (MPI_SAS_DEVICE_INFO_SSP_TARGET |
+                     MPI_SAS_DEVICE_INFO_STP_TARGET |
+                     MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0)
+                       continue;
+
+               phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
+               if (!phy_info)
+                       continue;
+
+               if (mptsas_get_rphy(phy_info))
+                       continue;
+
+               mptsas_add_end_device(ioc, phy_info);
+       }
+}
+
+/**
+ *     mptsas_scan_sas_topology -
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @sas_address:
+ *
+ **/
+static void
 mptsas_scan_sas_topology(MPT_ADAPTER *ioc)
 {
-       u32 handle = 0xFFFF;
+       struct scsi_device *sdev;
        int i;
 
-       mutex_lock(&ioc->sas_discovery_mutex);
        mptsas_probe_hba_phys(ioc);
-       while (!mptsas_probe_expander_phys(ioc, &handle))
-               ;
+       mptsas_probe_expanders(ioc);
+       mptsas_probe_devices(ioc);
+
        /*
          Reporting RAID volumes.
        */
-       if (!ioc->ir_firmware)
-               goto out;
-       if (!ioc->raid_data.pIocPg2)
-               goto out;
-       if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
-               goto out;
+       if (!ioc->ir_firmware || !ioc->raid_data.pIocPg2 ||
+           !ioc->raid_data.pIocPg2->NumActiveVolumes)
+               return;
        for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+               sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
+                   ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
+               if (sdev) {
+                       scsi_device_put(sdev);
+                       continue;
+               }
+               printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
+                   "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+                   ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID);
                scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
                    ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID, 0);
        }
- out:
-       mutex_unlock(&ioc->sas_discovery_mutex);
 }
 
-/*
- * Work queue thread to handle Runtime discovery
- * Mere purpose is the hot add/delete of expanders
- *(Mutex UNLOCKED)
- */
+
 static void
-__mptsas_discovery_work(MPT_ADAPTER *ioc)
+mptsas_handle_queue_full_event(struct fw_event_work *fw_event)
 {
-       u32 handle = 0xFFFF;
+       MPT_ADAPTER *ioc;
+       EventDataQueueFull_t *qfull_data;
+       struct mptsas_device_info *sas_info;
+       struct scsi_device      *sdev;
+       int depth;
+       int id = -1;
+       int channel = -1;
+       int fw_id, fw_channel;
+       u16 current_depth;
+
+
+       ioc = fw_event->ioc;
+       qfull_data = (EventDataQueueFull_t *)fw_event->event_data;
+       fw_id = qfull_data->TargetID;
+       fw_channel = qfull_data->Bus;
+       current_depth = le16_to_cpu(qfull_data->CurrentDepth);
+
+       /* if hidden raid component, look for the volume id */
+       mutex_lock(&ioc->sas_device_info_mutex);
+       if (mptscsih_is_phys_disk(ioc, fw_channel, fw_id)) {
+               list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+                   list) {
+                       if (sas_info->is_cached ||
+                           sas_info->is_logical_volume)
+                               continue;
+                       if (sas_info->is_hidden_raid_component &&
+                           (sas_info->fw.channel == fw_channel &&
+                           sas_info->fw.id == fw_id)) {
+                               id = sas_info->volume_id;
+                               channel = MPTSAS_RAID_CHANNEL;
+                               goto out;
+                       }
+               }
+       } else {
+               list_for_each_entry(sas_info, &ioc->sas_device_info_list,
+                   list) {
+                       if (sas_info->is_cached ||
+                           sas_info->is_hidden_raid_component ||
+                           sas_info->is_logical_volume)
+                               continue;
+                       if (sas_info->fw.channel == fw_channel &&
+                           sas_info->fw.id == fw_id) {
+                               id = sas_info->os.id;
+                               channel = sas_info->os.channel;
+                               goto out;
+                       }
+               }
 
-       ioc->sas_discovery_runtime=1;
-       mptsas_delete_expander_phys(ioc);
-       mptsas_probe_hba_phys(ioc);
-       while (!mptsas_probe_expander_phys(ioc, &handle))
-               ;
-       ioc->sas_discovery_runtime=0;
-}
+       }
 
-/*
- * Work queue thread to handle Runtime discovery
- * Mere purpose is the hot add/delete of expanders
- *(Mutex LOCKED)
- */
-static void
-mptsas_discovery_work(struct work_struct *work)
-{
-       struct mptsas_discovery_event *ev =
-               container_of(work, struct mptsas_discovery_event, work);
-       MPT_ADAPTER *ioc = ev->ioc;
+ out:
+       mutex_unlock(&ioc->sas_device_info_mutex);
+
+       if (id != -1) {
+               shost_for_each_device(sdev, ioc->sh) {
+                       if (sdev->id == id && sdev->channel == channel) {
+                               if (current_depth > sdev->queue_depth) {
+                                       sdev_printk(KERN_INFO, sdev,
+                                           "strange observation, the queue "
+                                           "depth is (%d) meanwhile fw queue "
+                                           "depth (%d)\n", sdev->queue_depth,
+                                           current_depth);
+                                       continue;
+                               }
+                               depth = scsi_track_queue_full(sdev,
+                                   current_depth - 1);
+                               if (depth > 0)
+                                       sdev_printk(KERN_INFO, sdev,
+                                       "Queue depth reduced to (%d)\n",
+                                          depth);
+                               else if (depth < 0)
+                                       sdev_printk(KERN_INFO, sdev,
+                                       "Tagged Command Queueing is being "
+                                       "disabled\n");
+                               else if (depth == 0)
+                                       sdev_printk(KERN_INFO, sdev,
+                                       "Queue depth not changed yet\n");
+                       }
+               }
+       }
 
-       mutex_lock(&ioc->sas_discovery_mutex);
-       __mptsas_discovery_work(ioc);
-       mutex_unlock(&ioc->sas_discovery_mutex);
-       kfree(ev);
+       mptsas_free_fw_event(ioc, fw_event);
 }
 
+
 static struct mptsas_phyinfo *
 mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
 {
@@ -2429,69 +3786,80 @@ mptsas_find_phyinfo_by_sas_address(MPT_ADAPTER *ioc, u64 sas_address)
        return phy_info;
 }
 
+/**
+ *     mptsas_find_phyinfo_by_phys_disk_num -
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @phys_disk_num:
+ *     @channel:
+ *     @id:
+ *
+ **/
 static struct mptsas_phyinfo *
-mptsas_find_phyinfo_by_target(MPT_ADAPTER *ioc, u8 channel, u8 id)
+mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 phys_disk_num,
+       u8 channel, u8 id)
 {
-       struct mptsas_portinfo *port_info;
        struct mptsas_phyinfo *phy_info = NULL;
+       struct mptsas_portinfo *port_info;
+       RaidPhysDiskPage1_t *phys_disk = NULL;
+       int num_paths;
+       u64 sas_address = 0;
        int i;
 
-       mutex_lock(&ioc->sas_topology_mutex);
-       list_for_each_entry(port_info, &ioc->sas_topology, list) {
-               for (i = 0; i < port_info->num_phys; i++) {
-                       if (!mptsas_is_end_device(
-                               &port_info->phy_info[i].attached))
-                               continue;
-                       if (port_info->phy_info[i].attached.id != id)
-                               continue;
-                       if (port_info->phy_info[i].attached.channel != channel)
-                               continue;
-                       phy_info = &port_info->phy_info[i];
-                       break;
+       phy_info = NULL;
+       if (!ioc->raid_data.pIocPg3)
+               return NULL;
+       /* dual port support */
+       num_paths = mpt_raid_phys_disk_get_num_paths(ioc, phys_disk_num);
+       if (!num_paths)
+               goto out;
+       phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+          (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+       if (!phys_disk)
+               goto out;
+       mpt_raid_phys_disk_pg1(ioc, phys_disk_num, phys_disk);
+       for (i = 0; i < num_paths; i++) {
+               if ((phys_disk->Path[i].Flags & 1) != 0)
+                       /* entry no longer valid */
+                       continue;
+               if ((id == phys_disk->Path[i].PhysDiskID) &&
+                   (channel == phys_disk->Path[i].PhysDiskBus)) {
+                       memcpy(&sas_address, &phys_disk->Path[i].WWID,
+                               sizeof(u64));
+                       phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+                                       sas_address);
+                       goto out;
                }
        }
-       mutex_unlock(&ioc->sas_topology_mutex);
-       return phy_info;
-}
 
-static struct mptsas_phyinfo *
-mptsas_find_phyinfo_by_phys_disk_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
-{
-       struct mptsas_portinfo *port_info;
-       struct mptsas_phyinfo *phy_info = NULL;
-       int i;
+ out:
+       kfree(phys_disk);
+       if (phy_info)
+               return phy_info;
 
+       /*
+        * Extra code to handle RAID0 case, where the sas_address is not updated
+        * in phys_disk_page_1 when hotswapped
+        */
        mutex_lock(&ioc->sas_topology_mutex);
        list_for_each_entry(port_info, &ioc->sas_topology, list) {
-               for (i = 0; i < port_info->num_phys; i++) {
+               for (i = 0; i < port_info->num_phys && !phy_info; i++) {
                        if (!mptsas_is_end_device(
                                &port_info->phy_info[i].attached))
                                continue;
                        if (port_info->phy_info[i].attached.phys_disk_num == ~0)
                                continue;
-                       if (port_info->phy_info[i].attached.phys_disk_num != id)
-                               continue;
-                       if (port_info->phy_info[i].attached.channel != channel)
-                               continue;
-                       phy_info = &port_info->phy_info[i];
-                       break;
+                       if ((port_info->phy_info[i].attached.phys_disk_num ==
+                           phys_disk_num) &&
+                           (port_info->phy_info[i].attached.id == id) &&
+                           (port_info->phy_info[i].attached.channel ==
+                            channel))
+                               phy_info = &port_info->phy_info[i];
                }
        }
        mutex_unlock(&ioc->sas_topology_mutex);
        return phy_info;
 }
 
-/*
- * Work queue thread to clear the persitency table
- */
-static void
-mptsas_persist_clear_table(struct work_struct *work)
-{
-       MPT_ADAPTER *ioc = container_of(work, MPT_ADAPTER, sas_persist_task);
-
-       mptbase_sas_persist_operation(ioc, MPI_SAS_OP_CLEAR_NOT_PRESENT);
-}
-
 static void
 mptsas_reprobe_lun(struct scsi_device *sdev, void *data)
 {
@@ -2517,7 +3885,8 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
        pRaidVolumePage0_t              buffer = NULL;
        RaidPhysDiskPage0_t             phys_disk;
        int                             i;
-       struct mptsas_hotplug_event     *ev;
+       struct mptsas_phyinfo   *phy_info;
+       struct mptsas_devinfo           sas_device;
 
        memset(&cfg, 0 , sizeof(CONFIGPARMS));
        memset(&hdr, 0 , sizeof(ConfigPageHeader_t));
@@ -2557,20 +3926,16 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
                    buffer->PhysDisk[i].PhysDiskNum, &phys_disk) != 0)
                        continue;
 
-               ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-               if (!ev) {
-                       printk(MYIOC_s_WARN_FMT "mptsas: lost hotplug event\n", ioc->name);
-                       goto out;
-               }
+               if (mptsas_sas_device_pg0(ioc, &sas_device,
+                   (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+                    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+                       (phys_disk.PhysDiskBus << 8) +
+                       phys_disk.PhysDiskID))
+                       continue;
 
-               INIT_WORK(&ev->work, mptsas_hotplug_work);
-               ev->ioc = ioc;
-               ev->id = phys_disk.PhysDiskID;
-               ev->channel = phys_disk.PhysDiskBus;
-               ev->phys_disk_num_valid = 1;
-               ev->phys_disk_num = phys_disk.PhysDiskNum;
-               ev->event_type = MPTSAS_ADD_DEVICE;
-               schedule_work(&ev->work);
+               phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+                   sas_device.sas_address);
+               mptsas_add_end_device(ioc, phy_info);
        }
 
  out:
@@ -2582,417 +3947,386 @@ mptsas_adding_inactive_raid_components(MPT_ADAPTER *ioc, u8 channel, u8 id)
  * Work queue thread to handle SAS hotplug events
  */
 static void
-mptsas_hotplug_work(struct work_struct *work)
+mptsas_hotplug_work(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
+    struct mptsas_hotplug_event *hot_plug_info)
 {
-       struct mptsas_hotplug_event *ev =
-               container_of(work, struct mptsas_hotplug_event, work);
-
-       MPT_ADAPTER *ioc = ev->ioc;
        struct mptsas_phyinfo *phy_info;
-       struct sas_rphy *rphy;
-       struct sas_port *port;
-       struct scsi_device *sdev;
        struct scsi_target * starget;
-       struct sas_identify identify;
-       char *ds = NULL;
        struct mptsas_devinfo sas_device;
        VirtTarget *vtarget;
-       VirtDevice *vdevice;
+       int i;
+
+       switch (hot_plug_info->event_type) {
+
+       case MPTSAS_ADD_PHYSDISK:
+
+               if (!ioc->raid_data.pIocPg2)
+                       break;
+
+               for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++) {
+                       if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID ==
+                           hot_plug_info->id) {
+                               printk(MYIOC_s_WARN_FMT "firmware bug: unable "
+                                   "to add hidden disk - target_id matchs "
+                                   "volume_id\n", ioc->name);
+                               mptsas_free_fw_event(ioc, fw_event);
+                               return;
+                       }
+               }
+               mpt_findImVolumes(ioc);
+
+       case MPTSAS_ADD_DEVICE:
+               memset(&sas_device, 0, sizeof(struct mptsas_devinfo));
+               mptsas_sas_device_pg0(ioc, &sas_device,
+                   (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+                   MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+                   (hot_plug_info->channel << 8) +
+                   hot_plug_info->id);
+
+               if (!sas_device.handle)
+                       return;
+
+               phy_info = mptsas_refreshing_device_handles(ioc, &sas_device);
+               if (!phy_info)
+                       break;
+
+               if (mptsas_get_rphy(phy_info))
+                       break;
+
+               mptsas_add_end_device(ioc, phy_info);
+               break;
 
-       mutex_lock(&ioc->sas_discovery_mutex);
-       switch (ev->event_type) {
        case MPTSAS_DEL_DEVICE:
+               phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
+                   hot_plug_info->sas_address);
+               mptsas_del_end_device(ioc, phy_info);
+               break;
 
-               phy_info = NULL;
-               if (ev->phys_disk_num_valid) {
-                       if (ev->hidden_raid_component){
-                               if (mptsas_sas_device_pg0(ioc, &sas_device,
-                                   (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
-                                    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
-                                   (ev->channel << 8) + ev->id)) {
-                                       dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                                       "%s: exit at line=%d\n", ioc->name,
-                                               __func__, __LINE__));
-                                       break;
-                               }
-                               phy_info = mptsas_find_phyinfo_by_sas_address(
-                                   ioc, sas_device.sas_address);
-                       }else
-                               phy_info = mptsas_find_phyinfo_by_phys_disk_num(
-                                   ioc, ev->channel, ev->phys_disk_num);
-               }
+       case MPTSAS_DEL_PHYSDISK:
 
-               if (!phy_info)
-                       phy_info = mptsas_find_phyinfo_by_target(ioc,
-                           ev->channel, ev->id);
+               mpt_findImVolumes(ioc);
 
-               /*
-                * Sanity checks, for non-existing phys and remote rphys.
-                */
-               if (!phy_info){
+               phy_info = mptsas_find_phyinfo_by_phys_disk_num(
+                               ioc, hot_plug_info->phys_disk_num,
+                               hot_plug_info->channel,
+                               hot_plug_info->id);
+               mptsas_del_end_device(ioc, phy_info);
+               break;
+
+       case MPTSAS_ADD_PHYSDISK_REPROBE:
+
+               if (mptsas_sas_device_pg0(ioc, &sas_device,
+                   (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
+                    MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
+                   (hot_plug_info->channel << 8) + hot_plug_info->id)) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
+                       "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                                __func__, hot_plug_info->id, __LINE__));
                        break;
                }
-               if (!phy_info->port_details) {
+
+               phy_info = mptsas_find_phyinfo_by_sas_address(
+                   ioc, sas_device.sas_address);
+
+               if (!phy_info) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
+                               "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                                __func__, hot_plug_info->id, __LINE__));
                        break;
                }
-               rphy = mptsas_get_rphy(phy_info);
-               if (!rphy) {
+
+               starget = mptsas_get_starget(phy_info);
+               if (!starget) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
+                               "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                                __func__, hot_plug_info->id, __LINE__));
                        break;
                }
 
-               port = mptsas_get_port(phy_info);
-               if (!port) {
+               vtarget = starget->hostdata;
+               if (!vtarget) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
+                               "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                                __func__, hot_plug_info->id, __LINE__));
                        break;
                }
 
-               starget = mptsas_get_starget(phy_info);
-               if (starget) {
-                       vtarget = starget->hostdata;
-
-                       if (!vtarget) {
-                               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                                       "%s: exit at line=%d\n", ioc->name,
-                                       __func__, __LINE__));
-                               break;
-                       }
+               mpt_findImVolumes(ioc);
 
-                       /*
-                        * Handling  RAID components
-                        */
-                       if (ev->phys_disk_num_valid &&
-                           ev->hidden_raid_component) {
-                               printk(MYIOC_s_INFO_FMT
-                                   "RAID Hidding: channel=%d, id=%d, "
-                                   "physdsk %d \n", ioc->name, ev->channel,
-                                   ev->id, ev->phys_disk_num);
-                               vtarget->id = ev->phys_disk_num;
-                               vtarget->tflags |=
-                                   MPT_TARGET_FLAGS_RAID_COMPONENT;
-                               mptsas_reprobe_target(starget, 1);
-                               phy_info->attached.phys_disk_num =
-                                   ev->phys_disk_num;
-                       break;
-                       }
-               }
+               starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Hidding: "
+                   "fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
+                   ioc->name, hot_plug_info->channel, hot_plug_info->id,
+                   hot_plug_info->phys_disk_num, (unsigned long long)
+                   sas_device.sas_address);
 
-               if (phy_info->attached.device_info &
-                   MPI_SAS_DEVICE_INFO_SSP_TARGET)
-                       ds = "ssp";
-               if (phy_info->attached.device_info &
-                   MPI_SAS_DEVICE_INFO_STP_TARGET)
-                       ds = "stp";
-               if (phy_info->attached.device_info &
-                   MPI_SAS_DEVICE_INFO_SATA_DEVICE)
-                       ds = "sata";
-
-               printk(MYIOC_s_INFO_FMT
-                      "removing %s device, channel %d, id %d, phy %d\n",
-                      ioc->name, ds, ev->channel, ev->id, phy_info->phy_id);
-               dev_printk(KERN_DEBUG, &port->dev, MYIOC_s_FMT
-                   "delete port (%d)\n", ioc->name, port->port_identifier);
-               sas_port_delete(port);
-               mptsas_port_delete(ioc, phy_info->port_details);
+               vtarget->id = hot_plug_info->phys_disk_num;
+               vtarget->tflags |= MPT_TARGET_FLAGS_RAID_COMPONENT;
+               phy_info->attached.phys_disk_num = hot_plug_info->phys_disk_num;
+               mptsas_reprobe_target(starget, 1);
                break;
-       case MPTSAS_ADD_DEVICE:
 
-               if (ev->phys_disk_num_valid)
-                       mpt_findImVolumes(ioc);
+       case MPTSAS_DEL_PHYSDISK_REPROBE:
 
-               /*
-                * Refresh sas device pg0 data
-                */
                if (mptsas_sas_device_pg0(ioc, &sas_device,
                    (MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID <<
                     MPI_SAS_DEVICE_PGAD_FORM_SHIFT),
-                       (ev->channel << 8) + ev->id)) {
+                       (hot_plug_info->channel << 8) + hot_plug_info->id)) {
                                dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                                       "%s: exit at line=%d\n", ioc->name,
-                                       __func__, __LINE__));
+                                   "%s: fw_id=%d exit at line=%d\n",
+                                   ioc->name, __func__,
+                                   hot_plug_info->id, __LINE__));
                        break;
                }
 
-               __mptsas_discovery_work(ioc);
-
                phy_info = mptsas_find_phyinfo_by_sas_address(ioc,
                                sas_device.sas_address);
-
-               if (!phy_info || !phy_info->port_details) {
+               if (!phy_info) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
+                           "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, hot_plug_info->id, __LINE__));
                        break;
                }
 
                starget = mptsas_get_starget(phy_info);
-               if (starget && (!ev->hidden_raid_component)){
-
-                       vtarget = starget->hostdata;
-
-                       if (!vtarget) {
-                               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                                   "%s: exit at line=%d\n", ioc->name,
-                                   __func__, __LINE__));
-                               break;
-                       }
-                       /*
-                        * Handling  RAID components
-                        */
-                       if (vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT) {
-                               printk(MYIOC_s_INFO_FMT
-                                   "RAID Exposing: channel=%d, id=%d, "
-                                   "physdsk %d \n", ioc->name, ev->channel,
-                                   ev->id, ev->phys_disk_num);
-                               vtarget->tflags &=
-                                   ~MPT_TARGET_FLAGS_RAID_COMPONENT;
-                               vtarget->id = ev->id;
-                               mptsas_reprobe_target(starget, 0);
-                               phy_info->attached.phys_disk_num = ~0;
-                       }
+               if (!starget) {
+                       dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                           "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, hot_plug_info->id, __LINE__));
                        break;
                }
 
-               if (mptsas_get_rphy(phy_info)) {
+               vtarget = starget->hostdata;
+               if (!vtarget) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
-                       if (ev->channel) printk("%d\n", __LINE__);
+                           "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, hot_plug_info->id, __LINE__));
                        break;
                }
 
-               port = mptsas_get_port(phy_info);
-               if (!port) {
+               if (!(vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)) {
                        dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
+                           "%s: fw_id=%d exit at line=%d\n", ioc->name,
+                        __func__, hot_plug_info->id, __LINE__));
                        break;
                }
-               memcpy(&phy_info->attached, &sas_device,
-                   sizeof(struct mptsas_devinfo));
-
-               if (phy_info->attached.device_info &
-                   MPI_SAS_DEVICE_INFO_SSP_TARGET)
-                       ds = "ssp";
-               if (phy_info->attached.device_info &
-                   MPI_SAS_DEVICE_INFO_STP_TARGET)
-                       ds = "stp";
-               if (phy_info->attached.device_info &
-                   MPI_SAS_DEVICE_INFO_SATA_DEVICE)
-                       ds = "sata";
-
-               printk(MYIOC_s_INFO_FMT
-                      "attaching %s device, channel %d, id %d, phy %d\n",
-                      ioc->name, ds, ev->channel, ev->id, ev->phy_id);
 
-               mptsas_parse_device_info(&identify, &phy_info->attached);
-               rphy = sas_end_device_alloc(port);
-               if (!rphy) {
-                       dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
-                       break; /* non-fatal: an rphy can be added later */
-               }
+               mpt_findImVolumes(ioc);
 
-               rphy->identify = identify;
-               if (sas_rphy_add(rphy)) {
-                       dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
-                               "%s: exit at line=%d\n", ioc->name,
-                               __func__, __LINE__));
-                       sas_rphy_free(rphy);
-                       break;
-               }
-               mptsas_set_rphy(ioc, phy_info, rphy);
+               starget_printk(KERN_INFO, starget, MYIOC_s_FMT "RAID Exposing:"
+                   " fw_channel=%d, fw_id=%d, physdsk %d, sas_addr 0x%llx\n",
+                   ioc->name, hot_plug_info->channel, hot_plug_info->id,
+                   hot_plug_info->phys_disk_num, (unsigned long long)
+                   sas_device.sas_address);
+
+               vtarget->tflags &= ~MPT_TARGET_FLAGS_RAID_COMPONENT;
+               vtarget->id = hot_plug_info->id;
+               phy_info->attached.phys_disk_num = ~0;
+               mptsas_reprobe_target(starget, 0);
+               mptsas_add_device_component_by_fw(ioc,
+                   hot_plug_info->channel, hot_plug_info->id);
                break;
+
        case MPTSAS_ADD_RAID:
-               sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
-                   ev->id, 0);
-               if (sdev) {
-                       scsi_device_put(sdev);
-                       break;
-               }
-               printk(MYIOC_s_INFO_FMT
-                      "attaching raid volume, channel %d, id %d\n",
-                      ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
-               scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL, ev->id, 0);
+
                mpt_findImVolumes(ioc);
+               printk(MYIOC_s_INFO_FMT "attaching raid volume, channel %d, "
+                   "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+                   hot_plug_info->id);
+               scsi_add_device(ioc->sh, MPTSAS_RAID_CHANNEL,
+                   hot_plug_info->id, 0);
                break;
+
        case MPTSAS_DEL_RAID:
-               sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
-                   ev->id, 0);
-               if (!sdev)
-                       break;
-               printk(MYIOC_s_INFO_FMT
-                      "removing raid volume, channel %d, id %d\n",
-                      ioc->name, MPTSAS_RAID_CHANNEL, ev->id);
-               vdevice = sdev->hostdata;
-               scsi_remove_device(sdev);
-               scsi_device_put(sdev);
+
                mpt_findImVolumes(ioc);
+               printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
+                   "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL,
+                   hot_plug_info->id);
+               scsi_remove_device(hot_plug_info->sdev);
+               scsi_device_put(hot_plug_info->sdev);
                break;
+
        case MPTSAS_ADD_INACTIVE_VOLUME:
+
+               mpt_findImVolumes(ioc);
                mptsas_adding_inactive_raid_components(ioc,
-                   ev->channel, ev->id);
+                   hot_plug_info->channel, hot_plug_info->id);
                break;
-       case MPTSAS_IGNORE_EVENT:
+
        default:
                break;
        }
 
-       mutex_unlock(&ioc->sas_discovery_mutex);
-       kfree(ev);
+       mptsas_free_fw_event(ioc, fw_event);
 }
 
 static void
-mptsas_send_sas_event(MPT_ADAPTER *ioc,
-               EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data)
+mptsas_send_sas_event(struct fw_event_work *fw_event)
 {
-       struct mptsas_hotplug_event *ev;
-       u32 device_info = le32_to_cpu(sas_event_data->DeviceInfo);
-       __le64 sas_address;
+       MPT_ADAPTER *ioc;
+       struct mptsas_hotplug_event hot_plug_info;
+       EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data;
+       u32 device_info;
+       u64 sas_address;
+
+       ioc = fw_event->ioc;
+       sas_event_data = (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)
+           fw_event->event_data;
+       device_info = le32_to_cpu(sas_event_data->DeviceInfo);
 
        if ((device_info &
-            (MPI_SAS_DEVICE_INFO_SSP_TARGET |
-             MPI_SAS_DEVICE_INFO_STP_TARGET |
-             MPI_SAS_DEVICE_INFO_SATA_DEVICE )) == 0)
+               (MPI_SAS_DEVICE_INFO_SSP_TARGET |
+               MPI_SAS_DEVICE_INFO_STP_TARGET |
+               MPI_SAS_DEVICE_INFO_SATA_DEVICE)) == 0) {
+               mptsas_free_fw_event(ioc, fw_event);
+               return;
+       }
+
+       if (sas_event_data->ReasonCode ==
+               MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED) {
+               mptbase_sas_persist_operation(ioc,
+               MPI_SAS_OP_CLEAR_NOT_PRESENT);
+               mptsas_free_fw_event(ioc, fw_event);
                return;
+       }
 
        switch (sas_event_data->ReasonCode) {
        case MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING:
-
-               mptsas_target_reset_queue(ioc, sas_event_data);
-               break;
-
        case MPI_EVENT_SAS_DEV_STAT_RC_ADDED:
-               ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-               if (!ev) {
-                       printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name);
-                       break;
-               }
-
-               INIT_WORK(&ev->work, mptsas_hotplug_work);
-               ev->ioc = ioc;
-               ev->handle = le16_to_cpu(sas_event_data->DevHandle);
-               ev->parent_handle =
-                   le16_to_cpu(sas_event_data->ParentDevHandle);
-               ev->channel = sas_event_data->Bus;
-               ev->id = sas_event_data->TargetID;
-               ev->phy_id = sas_event_data->PhyNum;
+               memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+               hot_plug_info.handle = le16_to_cpu(sas_event_data->DevHandle);
+               hot_plug_info.channel = sas_event_data->Bus;
+               hot_plug_info.id = sas_event_data->TargetID;
+               hot_plug_info.phy_id = sas_event_data->PhyNum;
                memcpy(&sas_address, &sas_event_data->SASAddress,
-                   sizeof(__le64));
-               ev->sas_address = le64_to_cpu(sas_address);
-               ev->device_info = device_info;
-
+                   sizeof(u64));
+               hot_plug_info.sas_address = le64_to_cpu(sas_address);
+               hot_plug_info.device_info = device_info;
                if (sas_event_data->ReasonCode &
                    MPI_EVENT_SAS_DEV_STAT_RC_ADDED)
-                       ev->event_type = MPTSAS_ADD_DEVICE;
+                       hot_plug_info.event_type = MPTSAS_ADD_DEVICE;
                else
-                       ev->event_type = MPTSAS_DEL_DEVICE;
-               schedule_work(&ev->work);
+                       hot_plug_info.event_type = MPTSAS_DEL_DEVICE;
+               mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
                break;
+
        case MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED:
-       /*
-        * Persistent table is full.
-        */
-               INIT_WORK(&ioc->sas_persist_task,
-                   mptsas_persist_clear_table);
-               schedule_work(&ioc->sas_persist_task);
+               mptbase_sas_persist_operation(ioc,
+                   MPI_SAS_OP_CLEAR_NOT_PRESENT);
+               mptsas_free_fw_event(ioc, fw_event);
                break;
-       /*
-        * TODO, handle other events
-        */
+
        case MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA:
-       case MPI_EVENT_SAS_DEV_STAT_RC_UNSUPPORTED:
+       /* TODO */
        case MPI_EVENT_SAS_DEV_STAT_RC_INTERNAL_DEVICE_RESET:
-       case MPI_EVENT_SAS_DEV_STAT_RC_TASK_ABORT_INTERNAL:
-       case MPI_EVENT_SAS_DEV_STAT_RC_ABORT_TASK_SET_INTERNAL:
-       case MPI_EVENT_SAS_DEV_STAT_RC_CLEAR_TASK_SET_INTERNAL:
-       case MPI_EVENT_SAS_DEV_STAT_RC_QUERY_TASK_INTERNAL:
+       /* TODO */
        default:
+               mptsas_free_fw_event(ioc, fw_event);
                break;
        }
 }
+
 static void
-mptsas_send_raid_event(MPT_ADAPTER *ioc,
-               EVENT_DATA_RAID *raid_event_data)
+mptsas_send_raid_event(struct fw_event_work *fw_event)
 {
-       struct mptsas_hotplug_event *ev;
-       int status = le32_to_cpu(raid_event_data->SettingsStatus);
-       int state = (status >> 8) & 0xff;
-
-       if (ioc->bus_type != SAS)
-               return;
-
-       ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-       if (!ev) {
-               printk(MYIOC_s_WARN_FMT "lost hotplug event\n", ioc->name);
-               return;
+       MPT_ADAPTER *ioc;
+       EVENT_DATA_RAID *raid_event_data;
+       struct mptsas_hotplug_event hot_plug_info;
+       int status;
+       int state;
+       struct scsi_device *sdev = NULL;
+       VirtDevice *vdevice = NULL;
+       RaidPhysDiskPage0_t phys_disk;
+
+       ioc = fw_event->ioc;
+       raid_event_data = (EVENT_DATA_RAID *)fw_event->event_data;
+       status = le32_to_cpu(raid_event_data->SettingsStatus);
+       state = (status >> 8) & 0xff;
+
+       memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+       hot_plug_info.id = raid_event_data->VolumeID;
+       hot_plug_info.channel = raid_event_data->VolumeBus;
+       hot_plug_info.phys_disk_num = raid_event_data->PhysDiskNum;
+
+       if (raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_DELETED ||
+           raid_event_data->ReasonCode == MPI_EVENT_RAID_RC_VOLUME_CREATED ||
+           raid_event_data->ReasonCode ==
+           MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED) {
+               sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL,
+                   hot_plug_info.id, 0);
+               hot_plug_info.sdev = sdev;
+               if (sdev)
+                       vdevice = sdev->hostdata;
        }
 
-       INIT_WORK(&ev->work, mptsas_hotplug_work);
-       ev->ioc = ioc;
-       ev->id = raid_event_data->VolumeID;
-       ev->channel = raid_event_data->VolumeBus;
-       ev->event_type = MPTSAS_IGNORE_EVENT;
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
+           "ReasonCode=%02x\n", ioc->name, __func__,
+           raid_event_data->ReasonCode));
 
        switch (raid_event_data->ReasonCode) {
        case MPI_EVENT_RAID_RC_PHYSDISK_DELETED:
-               ev->phys_disk_num_valid = 1;
-               ev->phys_disk_num = raid_event_data->PhysDiskNum;
-               ev->event_type = MPTSAS_ADD_DEVICE;
+               hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK_REPROBE;
                break;
        case MPI_EVENT_RAID_RC_PHYSDISK_CREATED:
-               ev->phys_disk_num_valid = 1;
-               ev->phys_disk_num = raid_event_data->PhysDiskNum;
-               ev->hidden_raid_component = 1;
-               ev->event_type = MPTSAS_DEL_DEVICE;
+               hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK_REPROBE;
                break;
        case MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED:
                switch (state) {
                case MPI_PD_STATE_ONLINE:
                case MPI_PD_STATE_NOT_COMPATIBLE:
-                       ev->phys_disk_num_valid = 1;
-                       ev->phys_disk_num = raid_event_data->PhysDiskNum;
-                       ev->hidden_raid_component = 1;
-                       ev->event_type = MPTSAS_ADD_DEVICE;
+                       mpt_raid_phys_disk_pg0(ioc,
+                           raid_event_data->PhysDiskNum, &phys_disk);
+                       hot_plug_info.id = phys_disk.PhysDiskID;
+                       hot_plug_info.channel = phys_disk.PhysDiskBus;
+                       hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
                        break;
+               case MPI_PD_STATE_FAILED:
                case MPI_PD_STATE_MISSING:
                case MPI_PD_STATE_OFFLINE_AT_HOST_REQUEST:
                case MPI_PD_STATE_FAILED_AT_HOST_REQUEST:
                case MPI_PD_STATE_OFFLINE_FOR_ANOTHER_REASON:
-                       ev->phys_disk_num_valid = 1;
-                       ev->phys_disk_num = raid_event_data->PhysDiskNum;
-                       ev->event_type = MPTSAS_DEL_DEVICE;
+                       hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
                        break;
                default:
                        break;
                }
                break;
        case MPI_EVENT_RAID_RC_VOLUME_DELETED:
-               ev->event_type = MPTSAS_DEL_RAID;
+               if (!sdev)
+                       break;
+               vdevice->vtarget->deleted = 1; /* block IO */
+               hot_plug_info.event_type = MPTSAS_DEL_RAID;
                break;
        case MPI_EVENT_RAID_RC_VOLUME_CREATED:
-               ev->event_type = MPTSAS_ADD_RAID;
+               if (sdev) {
+                       scsi_device_put(sdev);
+                       break;
+               }
+               hot_plug_info.event_type = MPTSAS_ADD_RAID;
                break;
        case MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED:
+               if (!(status & MPI_RAIDVOL0_STATUS_FLAG_ENABLED)) {
+                       if (!sdev)
+                               break;
+                       vdevice->vtarget->deleted = 1; /* block IO */
+                       hot_plug_info.event_type = MPTSAS_DEL_RAID;
+                       break;
+               }
                switch (state) {
                case MPI_RAIDVOL0_STATUS_STATE_FAILED:
                case MPI_RAIDVOL0_STATUS_STATE_MISSING:
-                       ev->event_type = MPTSAS_DEL_RAID;
+                       if (!sdev)
+                               break;
+                       vdevice->vtarget->deleted = 1; /* block IO */
+                       hot_plug_info.event_type = MPTSAS_DEL_RAID;
                        break;
                case MPI_RAIDVOL0_STATUS_STATE_OPTIMAL:
                case MPI_RAIDVOL0_STATUS_STATE_DEGRADED:
-                       ev->event_type = MPTSAS_ADD_RAID;
+                       if (sdev) {
+                               scsi_device_put(sdev);
+                               break;
+                       }
+                       hot_plug_info.event_type = MPTSAS_ADD_RAID;
                        break;
                default:
                        break;
@@ -3001,32 +4335,188 @@ mptsas_send_raid_event(MPT_ADAPTER *ioc,
        default:
                break;
        }
-       schedule_work(&ev->work);
+
+       if (hot_plug_info.event_type != MPTSAS_IGNORE_EVENT)
+               mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
+       else
+               mptsas_free_fw_event(ioc, fw_event);
 }
 
-static void
-mptsas_send_discovery_event(MPT_ADAPTER *ioc,
-       EVENT_DATA_SAS_DISCOVERY *discovery_data)
+/**
+ *     mptsas_issue_tm - send mptsas internal tm request
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @type: Task Management type
+ *     @channel: channel number for task management
+ *     @id: Logical Target ID for reset (if appropriate)
+ *     @lun: Logical unit for reset (if appropriate)
+ *     @task_context: Context for the task to be aborted
+ *     @timeout: timeout for task management control
+ *
+ *     return 0 on success and -1 on failure:
+ *
+ */
+static int
+mptsas_issue_tm(MPT_ADAPTER *ioc, u8 type, u8 channel, u8 id, u64 lun,
+       int task_context, ulong timeout, u8 *issue_reset)
 {
-       struct mptsas_discovery_event *ev;
+       MPT_FRAME_HDR   *mf;
+       SCSITaskMgmt_t  *pScsiTm;
+       int              retval;
+       unsigned long    timeleft;
+
+       *issue_reset = 0;
+       mf = mpt_get_msg_frame(mptsasDeviceResetCtx, ioc);
+       if (mf == NULL) {
+               retval = -1; /* return failure */
+               dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "TaskMgmt request: no "
+                   "msg frames!!\n", ioc->name));
+               goto out;
+       }
 
-       /*
-        * DiscoveryStatus
-        *
-        * This flag will be non-zero when firmware
-        * kicks off discovery, and return to zero
-        * once its completed.
-        */
-       if (discovery_data->DiscoveryStatus)
-               return;
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request: mr = %p, "
+           "task_type = 0x%02X,\n\t timeout = %ld, fw_channel = %d, "
+           "fw_id = %d, lun = %lld,\n\t task_context = 0x%x\n", ioc->name, mf,
+            type, timeout, channel, id, (unsigned long long)lun,
+            task_context));
+
+       pScsiTm = (SCSITaskMgmt_t *) mf;
+       memset(pScsiTm, 0, sizeof(SCSITaskMgmt_t));
+       pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
+       pScsiTm->TaskType = type;
+       pScsiTm->MsgFlags = 0;
+       pScsiTm->TargetID = id;
+       pScsiTm->Bus = channel;
+       pScsiTm->ChainOffset = 0;
+       pScsiTm->Reserved = 0;
+       pScsiTm->Reserved1 = 0;
+       pScsiTm->TaskMsgContext = task_context;
+       int_to_scsilun(lun, (struct scsi_lun *)pScsiTm->LUN);
+
+       INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+       CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+       retval = 0;
+       mpt_put_msg_frame_hi_pri(mptsasDeviceResetCtx, ioc, mf);
+
+       /* Now wait for the command to complete */
+       timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
+           timeout*HZ);
+       if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               retval = -1; /* return failure */
+               dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
+                   "TaskMgmt request: TIMED OUT!(mr=%p)\n", ioc->name, mf));
+               mpt_free_msg_frame(ioc, mf);
+               if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+                       goto out;
+               *issue_reset = 1;
+               goto out;
+       }
+
+       if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+               retval = -1; /* return failure */
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "TaskMgmt request: failed with no reply\n", ioc->name));
+               goto out;
+       }
+
+ out:
+       CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+       return retval;
+}
 
-       ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-       if (!ev)
+/**
+ *     mptsas_broadcast_primative_work - Handle broadcast primitives
+ *     @work: work queue payload containing info describing the event
+ *
+ *     this will be handled in workqueue context.
+ */
+static void
+mptsas_broadcast_primative_work(struct fw_event_work *fw_event)
+{
+       MPT_ADAPTER *ioc = fw_event->ioc;
+       MPT_FRAME_HDR   *mf;
+       VirtDevice      *vdevice;
+       int                     ii;
+       struct scsi_cmnd        *sc;
+       SCSITaskMgmtReply_t     *pScsiTmReply;
+       u8                      issue_reset;
+       int                     task_context;
+       u8                      channel, id;
+       int                      lun;
+       u32                      termination_count;
+       u32                      query_count;
+
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "%s - enter\n", ioc->name, __func__));
+
+       mutex_lock(&ioc->taskmgmt_cmds.mutex);
+       if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+               mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+               mptsas_requeue_fw_event(ioc, fw_event, 1000);
                return;
-       INIT_WORK(&ev->work, mptsas_discovery_work);
-       ev->ioc = ioc;
-       schedule_work(&ev->work);
-};
+       }
+
+       issue_reset = 0;
+       termination_count = 0;
+       query_count = 0;
+       mpt_findImVolumes(ioc);
+       pScsiTmReply = (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply;
+
+       for (ii = 0; ii < ioc->req_depth; ii++) {
+               if (ioc->fw_events_off)
+                       goto out;
+               sc = mptscsih_get_scsi_lookup(ioc, ii);
+               if (!sc)
+                       continue;
+               mf = MPT_INDEX_2_MFPTR(ioc, ii);
+               if (!mf)
+                       continue;
+               task_context = mf->u.frame.hwhdr.msgctxu.MsgContext;
+               vdevice = sc->device->hostdata;
+               if (!vdevice || !vdevice->vtarget)
+                       continue;
+               if (vdevice->vtarget->tflags & MPT_TARGET_FLAGS_RAID_COMPONENT)
+                       continue; /* skip hidden raid components */
+               if (vdevice->vtarget->raidVolume)
+                       continue; /* skip hidden raid components */
+               channel = vdevice->vtarget->channel;
+               id = vdevice->vtarget->id;
+               lun = vdevice->lun;
+               if (mptsas_issue_tm(ioc, MPI_SCSITASKMGMT_TASKTYPE_QUERY_TASK,
+                   channel, id, (u64)lun, task_context, 30, &issue_reset))
+                       goto out;
+               query_count++;
+               termination_count +=
+                   le32_to_cpu(pScsiTmReply->TerminationCount);
+               if ((pScsiTmReply->IOCStatus == MPI_IOCSTATUS_SUCCESS) &&
+                   (pScsiTmReply->ResponseCode ==
+                   MPI_SCSITASKMGMT_RSP_TM_SUCCEEDED ||
+                   pScsiTmReply->ResponseCode ==
+                   MPI_SCSITASKMGMT_RSP_IO_QUEUED_ON_IOC))
+                       continue;
+               if (mptsas_issue_tm(ioc,
+                   MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET,
+                   channel, id, (u64)lun, 0, 30, &issue_reset))
+                       goto out;
+               termination_count +=
+                   le32_to_cpu(pScsiTmReply->TerminationCount);
+       }
+
+ out:
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "%s - exit, query_count = %d termination_count = %d\n",
+           ioc->name, __func__, query_count, termination_count));
+
+       ioc->broadcast_aen_busy = 0;
+       mpt_clear_taskmgmt_in_progress_flag(ioc);
+       mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+
+       if (issue_reset) {
+               printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+                   ioc->name, __func__);
+               mpt_HardResetHandler(ioc, CAN_SLEEP);
+       }
+       mptsas_free_fw_event(ioc, fw_event);
+}
 
 /*
  * mptsas_send_ir2_event - handle exposing hidden disk when
@@ -3037,76 +4527,159 @@ mptsas_send_discovery_event(MPT_ADAPTER *ioc,
  *
  */
 static void
-mptsas_send_ir2_event(MPT_ADAPTER *ioc, PTR_MPI_EVENT_DATA_IR2 ir2_data)
+mptsas_send_ir2_event(struct fw_event_work *fw_event)
 {
-       struct mptsas_hotplug_event *ev;
-
-       if (ir2_data->ReasonCode !=
-           MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED)
-               return;
-
-       ev = kzalloc(sizeof(*ev), GFP_ATOMIC);
-       if (!ev)
+       MPT_ADAPTER     *ioc;
+       struct mptsas_hotplug_event hot_plug_info;
+       MPI_EVENT_DATA_IR2      *ir2_data;
+       u8 reasonCode;
+       RaidPhysDiskPage0_t phys_disk;
+
+       ioc = fw_event->ioc;
+       ir2_data = (MPI_EVENT_DATA_IR2 *)fw_event->event_data;
+       reasonCode = ir2_data->ReasonCode;
+
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Entering %s: "
+           "ReasonCode=%02x\n", ioc->name, __func__, reasonCode));
+
+       memset(&hot_plug_info, 0, sizeof(struct mptsas_hotplug_event));
+       hot_plug_info.id = ir2_data->TargetID;
+       hot_plug_info.channel = ir2_data->Bus;
+       switch (reasonCode) {
+       case MPI_EVENT_IR2_RC_FOREIGN_CFG_DETECTED:
+               hot_plug_info.event_type = MPTSAS_ADD_INACTIVE_VOLUME;
+               break;
+       case MPI_EVENT_IR2_RC_DUAL_PORT_REMOVED:
+               hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
+               hot_plug_info.event_type = MPTSAS_DEL_PHYSDISK;
+               break;
+       case MPI_EVENT_IR2_RC_DUAL_PORT_ADDED:
+               hot_plug_info.phys_disk_num = ir2_data->PhysDiskNum;
+               mpt_raid_phys_disk_pg0(ioc,
+                   ir2_data->PhysDiskNum, &phys_disk);
+               hot_plug_info.id = phys_disk.PhysDiskID;
+               hot_plug_info.event_type = MPTSAS_ADD_PHYSDISK;
+               break;
+       default:
+               mptsas_free_fw_event(ioc, fw_event);
                return;
-
-       INIT_WORK(&ev->work, mptsas_hotplug_work);
-       ev->ioc = ioc;
-       ev->id = ir2_data->TargetID;
-       ev->channel = ir2_data->Bus;
-       ev->event_type = MPTSAS_ADD_INACTIVE_VOLUME;
-
-       schedule_work(&ev->work);
-};
+       }
+       mptsas_hotplug_work(ioc, fw_event, &hot_plug_info);
+}
 
 static int
 mptsas_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *reply)
 {
-       int rc=1;
-       u8 event = le32_to_cpu(reply->Event) & 0xFF;
-
-       if (!ioc->sh)
-               goto out;
+       u32 event = le32_to_cpu(reply->Event);
+       int sz, event_data_sz;
+       struct fw_event_work *fw_event;
+       unsigned long delay;
 
-       /*
-        * sas_discovery_ignore_events
-        *
-        * This flag is to prevent anymore processing of
-        * sas events once mptsas_remove function is called.
-        */
-       if (ioc->sas_discovery_ignore_events) {
-               rc = mptscsih_event_process(ioc, reply);
-               goto out;
-       }
+       /* events turned off due to host reset or driver unloading */
+       if (ioc->fw_events_off)
+               return 0;
 
+       delay = msecs_to_jiffies(1);
        switch (event) {
+       case MPI_EVENT_SAS_BROADCAST_PRIMITIVE:
+       {
+               EVENT_DATA_SAS_BROADCAST_PRIMITIVE *broadcast_event_data =
+                   (EVENT_DATA_SAS_BROADCAST_PRIMITIVE *)reply->Data;
+               if (broadcast_event_data->Primitive !=
+                   MPI_EVENT_PRIMITIVE_ASYNCHRONOUS_EVENT)
+                       return 0;
+               if (ioc->broadcast_aen_busy)
+                       return 0;
+               ioc->broadcast_aen_busy = 1;
+               break;
+       }
        case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE:
-               mptsas_send_sas_event(ioc,
-                       (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data);
+       {
+               EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *sas_event_data =
+                   (EVENT_DATA_SAS_DEVICE_STATUS_CHANGE *)reply->Data;
+
+               if (sas_event_data->ReasonCode ==
+                   MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING) {
+                       mptsas_target_reset_queue(ioc, sas_event_data);
+                       return 0;
+               }
                break;
-       case MPI_EVENT_INTEGRATED_RAID:
-               mptsas_send_raid_event(ioc,
-                       (EVENT_DATA_RAID *)reply->Data);
+       }
+       case MPI_EVENT_SAS_EXPANDER_STATUS_CHANGE:
+       {
+               MpiEventDataSasExpanderStatusChange_t *expander_data =
+                   (MpiEventDataSasExpanderStatusChange_t *)reply->Data;
+
+               if (ioc->old_sas_discovery_protocal)
+                       return 0;
+
+               if (expander_data->ReasonCode ==
+                   MPI_EVENT_SAS_EXP_RC_NOT_RESPONDING &&
+                   ioc->device_missing_delay)
+                       delay = HZ * ioc->device_missing_delay;
                break;
+       }
+       case MPI_EVENT_SAS_DISCOVERY:
+       {
+               u32 discovery_status;
+               EventDataSasDiscovery_t *discovery_data =
+                   (EventDataSasDiscovery_t *)reply->Data;
+
+               discovery_status = le32_to_cpu(discovery_data->DiscoveryStatus);
+               ioc->sas_discovery_quiesce_io = discovery_status ? 1 : 0;
+               if (ioc->old_sas_discovery_protocal && !discovery_status)
+                       mptsas_queue_rescan(ioc);
+               return 0;
+       }
+       case MPI_EVENT_INTEGRATED_RAID:
        case MPI_EVENT_PERSISTENT_TABLE_FULL:
-               INIT_WORK(&ioc->sas_persist_task,
-                   mptsas_persist_clear_table);
-               schedule_work(&ioc->sas_persist_task);
-               break;
-        case MPI_EVENT_SAS_DISCOVERY:
-               mptsas_send_discovery_event(ioc,
-                       (EVENT_DATA_SAS_DISCOVERY *)reply->Data);
-               break;
        case MPI_EVENT_IR2:
-               mptsas_send_ir2_event(ioc,
-                   (PTR_MPI_EVENT_DATA_IR2)reply->Data);
+       case MPI_EVENT_SAS_PHY_LINK_STATUS:
+       case MPI_EVENT_QUEUE_FULL:
                break;
        default:
-               rc = mptscsih_event_process(ioc, reply);
-               break;
+               return 0;
        }
- out:
 
-       return rc;
+       event_data_sz = ((reply->MsgLength * 4) -
+           offsetof(EventNotificationReply_t, Data));
+       sz = offsetof(struct fw_event_work, event_data) + event_data_sz;
+       fw_event = kzalloc(sz, GFP_ATOMIC);
+       if (!fw_event) {
+               printk(MYIOC_s_WARN_FMT "%s: failed at (line=%d)\n", ioc->name,
+                __func__, __LINE__);
+               return 0;
+       }
+       memcpy(fw_event->event_data, reply->Data, event_data_sz);
+       fw_event->event = event;
+       fw_event->ioc = ioc;
+       mptsas_add_fw_event(ioc, fw_event, delay);
+       return 0;
+}
+
+/* Delete a volume when no longer listed in ioc pg2
+ */
+static void mptsas_volume_delete(MPT_ADAPTER *ioc, u8 id)
+{
+       struct scsi_device *sdev;
+       int i;
+
+       sdev = scsi_device_lookup(ioc->sh, MPTSAS_RAID_CHANNEL, id, 0);
+       if (!sdev)
+               return;
+       if (!ioc->raid_data.pIocPg2)
+               goto out;
+       if (!ioc->raid_data.pIocPg2->NumActiveVolumes)
+               goto out;
+       for (i = 0; i < ioc->raid_data.pIocPg2->NumActiveVolumes; i++)
+               if (ioc->raid_data.pIocPg2->RaidVolume[i].VolumeID == id)
+                       goto release_sdev;
+ out:
+       printk(MYIOC_s_INFO_FMT "removing raid volume, channel %d, "
+           "id %d\n", ioc->name, MPTSAS_RAID_CHANNEL, id);
+       scsi_remove_device(sdev);
+ release_sdev:
+       scsi_device_put(sdev);
 }
 
 static int
@@ -3128,6 +4701,7 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                return r;
 
        ioc = pci_get_drvdata(pdev);
+       mptsas_fw_event_off(ioc);
        ioc->DoneCtx = mptsasDoneCtx;
        ioc->TaskCtx = mptsasTaskCtx;
        ioc->InternalCtx = mptsasInternalCtx;
@@ -3211,17 +4785,15 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         * A slightly different algorithm is required for
         * 64bit SGEs.
         */
-       scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
-       if (sizeof(dma_addr_t) == sizeof(u64)) {
+       scale = ioc->req_sz/ioc->SGE_size;
+       if (ioc->sg_addr_size == sizeof(u64)) {
                numSGE = (scale - 1) *
                  (ioc->facts.MaxChainDepth-1) + scale +
-                 (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
-                 sizeof(u32));
+                 (ioc->req_sz - 60) / ioc->SGE_size;
        } else {
                numSGE = 1 + (scale - 1) *
                  (ioc->facts.MaxChainDepth-1) + scale +
-                 (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
-                 sizeof(u32));
+                 (ioc->req_sz - 64) / ioc->SGE_size;
        }
 
        if (numSGE < sh->sg_tablesize) {
@@ -3251,9 +4823,6 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        /* Clear the TM flags
         */
-       hd->tmPending = 0;
-       hd->tmState = TM_STATE_NONE;
-       hd->resetPending = 0;
        hd->abortSCpnt = NULL;
 
        /* Clear the pointer used to store
@@ -3273,10 +4842,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        ioc->sas_data.ptClear = mpt_pt_clear;
 
-       init_waitqueue_head(&hd->scandv_waitq);
-       hd->scandv_wait_done = 0;
        hd->last_queue_full = 0;
        INIT_LIST_HEAD(&hd->target_reset_list);
+       INIT_LIST_HEAD(&ioc->sas_device_info_list);
+       mutex_init(&ioc->sas_device_info_mutex);
+
        spin_unlock_irqrestore(&ioc->FreeQlock, flags);
 
        if (ioc->sas_data.ptClear==1) {
@@ -3291,8 +4861,11 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_mptsas_probe;
        }
 
+       /* older firmware doesn't support expander events */
+       if ((ioc->facts.HeaderVersion >> 8) < 0xE)
+               ioc->old_sas_discovery_protocal = 1;
        mptsas_scan_sas_topology(ioc);
-
+       mptsas_fw_event_on(ioc);
        return 0;
 
  out_mptsas_probe:
@@ -3301,12 +4874,25 @@ mptsas_probe(struct pci_dev *pdev, const struct pci_device_id *id)
        return error;
 }
 
+void
+mptsas_shutdown(struct pci_dev *pdev)
+{
+       MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+       mptsas_fw_event_off(ioc);
+       mptsas_cleanup_fw_event_q(ioc);
+}
+
 static void __devexit mptsas_remove(struct pci_dev *pdev)
 {
        MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
        struct mptsas_portinfo *p, *n;
        int i;
 
+       mptsas_shutdown(pdev);
+
+       mptsas_del_device_components(ioc);
+
        ioc->sas_discovery_ignore_events = 1;
        sas_remove_host(ioc->sh);
 
@@ -3315,11 +4901,12 @@ static void __devexit mptsas_remove(struct pci_dev *pdev)
                list_del(&p->list);
                for (i = 0 ; i < p->num_phys ; i++)
                        mptsas_port_delete(ioc, p->phy_info[i].port_details);
+
                kfree(p->phy_info);
                kfree(p);
        }
        mutex_unlock(&ioc->sas_topology_mutex);
-
+       ioc->hba_port_info = NULL;
        mptscsih_remove(pdev);
 }
 
@@ -3344,7 +4931,7 @@ static struct pci_driver mptsas_driver = {
        .id_table       = mptsas_pci_table,
        .probe          = mptsas_probe,
        .remove         = __devexit_p(mptsas_remove),
-       .shutdown       = mptscsih_shutdown,
+       .shutdown       = mptsas_shutdown,
 #ifdef CONFIG_PM
        .suspend        = mptscsih_suspend,
        .resume         = mptscsih_resume,
@@ -3364,10 +4951,12 @@ mptsas_init(void)
                return -ENODEV;
 
        mptsasDoneCtx = mpt_register(mptscsih_io_done, MPTSAS_DRIVER);
-       mptsasTaskCtx = mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
+       mptsasTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSAS_DRIVER);
        mptsasInternalCtx =
                mpt_register(mptscsih_scandv_complete, MPTSAS_DRIVER);
        mptsasMgmtCtx = mpt_register(mptsas_mgmt_done, MPTSAS_DRIVER);
+       mptsasDeviceResetCtx =
+               mpt_register(mptsas_taskmgmt_complete, MPTSAS_DRIVER);
 
        mpt_event_register(mptsasDoneCtx, mptsas_event_process);
        mpt_reset_register(mptsasDoneCtx, mptsas_ioc_reset);
@@ -3392,6 +4981,7 @@ mptsas_exit(void)
        mpt_deregister(mptsasInternalCtx);
        mpt_deregister(mptsasTaskCtx);
        mpt_deregister(mptsasDoneCtx);
+       mpt_deregister(mptsasDeviceResetCtx);
 }
 
 module_init(mptsas_init);
index 2b544e0877e6f0f3dd1ccc686edcfe2cd3e9a744..953c2bfcf6aaa470f05a0e3be18c966c05f3dc6b 100644 (file)
@@ -53,6 +53,7 @@ struct mptsas_target_reset_event {
        struct list_head        list;
        EVENT_DATA_SAS_DEVICE_STATUS_CHANGE sas_event_data;
        u8      target_reset_issued;
+       unsigned long    time_count;
 };
 
 enum mptsas_hotplug_action {
@@ -60,12 +61,37 @@ enum mptsas_hotplug_action {
        MPTSAS_DEL_DEVICE,
        MPTSAS_ADD_RAID,
        MPTSAS_DEL_RAID,
+       MPTSAS_ADD_PHYSDISK,
+       MPTSAS_ADD_PHYSDISK_REPROBE,
+       MPTSAS_DEL_PHYSDISK,
+       MPTSAS_DEL_PHYSDISK_REPROBE,
        MPTSAS_ADD_INACTIVE_VOLUME,
        MPTSAS_IGNORE_EVENT,
 };
 
+struct mptsas_mapping{
+       u8                      id;
+       u8                      channel;
+};
+
+struct mptsas_device_info {
+       struct list_head        list;
+       struct mptsas_mapping   os;     /* operating system mapping*/
+       struct mptsas_mapping   fw;     /* firmware mapping */
+       u64                     sas_address;
+       u32                     device_info; /* specific bits for devices */
+       u16                     slot;           /* enclosure slot id */
+       u64                     enclosure_logical_id; /*enclosure address */
+       u8                      is_logical_volume; /* is this logical volume */
+       /* this belongs to volume */
+       u8                      is_hidden_raid_component;
+       /* this valid when is_hidden_raid_component set */
+       u8                      volume_id;
+       /* cached data for a removed device */
+       u8                      is_cached;
+};
+
 struct mptsas_hotplug_event {
-       struct work_struct      work;
        MPT_ADAPTER             *ioc;
        enum mptsas_hotplug_action event_type;
        u64                     sas_address;
@@ -73,11 +99,18 @@ struct mptsas_hotplug_event {
        u8                      id;
        u32                     device_info;
        u16                     handle;
-       u16                     parent_handle;
        u8                      phy_id;
-       u8                      phys_disk_num_valid;    /* hrc (hidden raid component) */
        u8                      phys_disk_num;          /* hrc - unique index*/
-       u8                      hidden_raid_component;  /* hrc - don't expose*/
+       struct scsi_device      *sdev;
+};
+
+struct fw_event_work {
+       struct list_head        list;
+       struct delayed_work      work;
+       MPT_ADAPTER     *ioc;
+       u32                     event;
+       u8                      retries;
+       u8                      event_data[1];
 };
 
 struct mptsas_discovery_event {
index e62c6bc4ad33ec83407a002a1549a5edd49cdc90..024e8305bcf20b8842defd7decd3f423effc3c3c 100644 (file)
@@ -80,7 +80,7 @@ MODULE_VERSION(my_VERSION);
 /*
  *  Other private/forward protos...
  */
-static struct scsi_cmnd * mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
+struct scsi_cmnd       *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
 static struct scsi_cmnd * mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i);
 static void    mptscsih_set_scsi_lookup(MPT_ADAPTER *ioc, int i, struct scsi_cmnd *scmd);
 static int     SCPNT_TO_LOOKUP_IDX(MPT_ADAPTER *ioc, struct scsi_cmnd *scmd);
@@ -92,18 +92,24 @@ static int  mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
                                 SCSIIORequest_t *pReq, int req_idx);
 static void    mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
 static void    mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
-static int     mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
-static int     mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
 
-static int     mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
+int    mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id,
+               int lun, int ctx2abort, ulong timeout);
 
 int            mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
 int            mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
 
+void
+mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
+static int     mptscsih_get_completion_code(MPT_ADAPTER *ioc,
+               MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
 int            mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
 static int     mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
 static void    mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, VirtDevice *vdevice);
 
+static int
+mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
+                               SCSITaskMgmtReply_t *pScsiTmReply);
 void           mptscsih_remove(struct pci_dev *);
 void           mptscsih_shutdown(struct pci_dev *);
 #ifdef CONFIG_PM
@@ -113,69 +119,6 @@ int                mptscsih_resume(struct pci_dev *pdev);
 
 #define SNS_LEN(scp)   SCSI_SENSE_BUFFERSIZE
 
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- *     mptscsih_add_sge - Place a simple SGE at address pAddr.
- *     @pAddr: virtual address for SGE
- *     @flagslength: SGE flags and data transfer length
- *     @dma_addr: Physical address
- *
- *     This routine places a MPT request frame back on the MPT adapter's
- *     FreeQ.
- */
-static inline void
-mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
-{
-       if (sizeof(dma_addr_t) == sizeof(u64)) {
-               SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
-               u32 tmp = dma_addr & 0xFFFFFFFF;
-
-               pSge->FlagsLength = cpu_to_le32(flagslength);
-               pSge->Address.Low = cpu_to_le32(tmp);
-               tmp = (u32) ((u64)dma_addr >> 32);
-               pSge->Address.High = cpu_to_le32(tmp);
-
-       } else {
-               SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
-               pSge->FlagsLength = cpu_to_le32(flagslength);
-               pSge->Address = cpu_to_le32(dma_addr);
-       }
-} /* mptscsih_add_sge() */
-
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- *     mptscsih_add_chain - Place a chain SGE at address pAddr.
- *     @pAddr: virtual address for SGE
- *     @next: nextChainOffset value (u32's)
- *     @length: length of next SGL segment
- *     @dma_addr: Physical address
- *
- *     This routine places a MPT request frame back on the MPT adapter's
- *     FreeQ.
- */
-static inline void
-mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
-{
-       if (sizeof(dma_addr_t) == sizeof(u64)) {
-               SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
-               u32 tmp = dma_addr & 0xFFFFFFFF;
-
-               pChain->Length = cpu_to_le16(length);
-               pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
-
-               pChain->NextChainOffset = next;
-
-               pChain->Address.Low = cpu_to_le32(tmp);
-               tmp = (u32) ((u64)dma_addr >> 32);
-               pChain->Address.High = cpu_to_le32(tmp);
-       } else {
-               SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
-               pChain->Length = cpu_to_le16(length);
-               pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
-               pChain->NextChainOffset = next;
-               pChain->Address = cpu_to_le32(dma_addr);
-       }
-} /* mptscsih_add_chain() */
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /*
@@ -281,10 +224,10 @@ mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
         */
 
 nextSGEset:
-       numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) );
+       numSgeSlots = ((frm_sz - sgeOffset) / ioc->SGE_size);
        numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
 
-       sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir;
+       sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | sgdir;
 
        /* Get first (num - 1) SG elements
         * Skip any SG entries with a length of 0
@@ -293,17 +236,19 @@ nextSGEset:
        for (ii=0; ii < (numSgeThisFrame-1); ii++) {
                thisxfer = sg_dma_len(sg);
                if (thisxfer == 0) {
-                       sg = sg_next(sg); /* Get next SG element from the OS */
+                       /* Get next SG element from the OS */
+                       sg = sg_next(sg);
                        sg_done++;
                        continue;
                }
 
                v2 = sg_dma_address(sg);
-               mptscsih_add_sge(psge, sgflags | thisxfer, v2);
+               ioc->add_sge(psge, sgflags | thisxfer, v2);
 
-               sg = sg_next(sg);       /* Get next SG element from the OS */
-               psge += (sizeof(u32) + sizeof(dma_addr_t));
-               sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
+               /* Get next SG element from the OS */
+               sg = sg_next(sg);
+               psge += ioc->SGE_size;
+               sgeOffset += ioc->SGE_size;
                sg_done++;
        }
 
@@ -320,12 +265,8 @@ nextSGEset:
                thisxfer = sg_dma_len(sg);
 
                v2 = sg_dma_address(sg);
-               mptscsih_add_sge(psge, sgflags | thisxfer, v2);
-               /*
-               sg = sg_next(sg);
-               psge += (sizeof(u32) + sizeof(dma_addr_t));
-               */
-               sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
+               ioc->add_sge(psge, sgflags | thisxfer, v2);
+               sgeOffset += ioc->SGE_size;
                sg_done++;
 
                if (chainSge) {
@@ -334,7 +275,8 @@ nextSGEset:
                         * Update the chain element
                         * Offset and Length fields.
                         */
-                       mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off);
+                       ioc->add_chain((char *)chainSge, 0, sgeOffset,
+                               ioc->ChainBufferDMA + chain_dma_off);
                } else {
                        /* The current buffer is the original MF
                         * and there is no Chain buffer.
@@ -367,7 +309,7 @@ nextSGEset:
                 * set properly).
                 */
                if (sg_done) {
-                       u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t)));
+                       u32 *ptmp = (u32 *) (psge - ioc->SGE_size);
                        sgflags = le32_to_cpu(*ptmp);
                        sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
                        *ptmp = cpu_to_le32(sgflags);
@@ -381,8 +323,9 @@ nextSGEset:
                         * Old chain element is now complete.
                         */
                        u8 nextChain = (u8) (sgeOffset >> 2);
-                       sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
-                       mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off);
+                       sgeOffset += ioc->SGE_size;
+                       ioc->add_chain((char *)chainSge, nextChain, sgeOffset,
+                                        ioc->ChainBufferDMA + chain_dma_off);
                } else {
                        /* The original MF buffer requires a chain buffer -
                         * set the offset.
@@ -592,14 +535,15 @@ mptscsih_info_scsiio(MPT_ADAPTER *ioc, struct scsi_cmnd *sc, SCSIIOReply_t * pSc
        }
 
        scsi_print_command(sc);
-       printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d\n",
-           ioc->name, pScsiReply->Bus, pScsiReply->TargetID);
+       printk(MYIOC_s_DEBUG_FMT "\tfw_channel = %d, fw_id = %d, lun = %d\n",
+           ioc->name, pScsiReply->Bus, pScsiReply->TargetID, sc->device->lun);
        printk(MYIOC_s_DEBUG_FMT "\trequest_len = %d, underflow = %d, "
            "resid = %d\n", ioc->name, scsi_bufflen(sc), sc->underflow,
            scsi_get_resid(sc));
        printk(MYIOC_s_DEBUG_FMT "\ttag = %d, transfer_count = %d, "
            "sc->result = %08X\n", ioc->name, le16_to_cpu(pScsiReply->TaskTag),
            le32_to_cpu(pScsiReply->TransferCount), sc->result);
+
        printk(MYIOC_s_DEBUG_FMT "\tiocstatus = %s (0x%04x), "
            "scsi_status = %s (0x%02x), scsi_state = (0x%02x)\n",
            ioc->name, desc, ioc_status, desc1, pScsiReply->SCSIStatus,
@@ -654,16 +598,14 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
        req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
        req_idx_MR = (mr != NULL) ?
            le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx) : req_idx;
+
+       /* Special case, where already freed message frame is received from
+        * Firmware. It happens with Resetting IOC.
+        * Return immediately. Do not care
+        */
        if ((req_idx != req_idx_MR) ||
-           (mf->u.frame.linkage.arg1 == 0xdeadbeaf)) {
-               printk(MYIOC_s_ERR_FMT "Received a mf that was already freed\n",
-                   ioc->name);
-               printk (MYIOC_s_ERR_FMT
-                   "req_idx=%x req_idx_MR=%x mf=%p mr=%p sc=%p\n",
-                   ioc->name, req_idx, req_idx_MR, mf, mr,
-                   mptscsih_get_scsi_lookup(ioc, req_idx_MR));
+           (le32_to_cpu(mf->u.frame.linkage.arg1) == 0xdeadbeaf))
                return 0;
-       }
 
        sc = mptscsih_getclear_scsi_lookup(ioc, req_idx);
        if (sc == NULL) {
@@ -810,12 +752,16 @@ mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
                         */
 
                case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:        /* 0x0048 */
-               case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:         /* 0x004C */
                        /* Linux handles an unsolicited DID_RESET better
                         * than an unsolicited DID_ABORT.
                         */
                        sc->result = DID_RESET << 16;
 
+               case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:         /* 0x004C */
+                       if (ioc->bus_type == FC)
+                               sc->result = DID_ERROR << 16;
+                       else
+                               sc->result = DID_RESET << 16;
                        break;
 
                case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:      /* 0x0049 */
@@ -992,9 +938,9 @@ mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
                scsi_dma_unmap(sc);
                sc->result = DID_RESET << 16;
                sc->host_scribble = NULL;
-               sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
-                   "completing cmds: fw_channel %d, fw_id %d, sc=%p,"
-                   " mf = %p, idx=%x\n", ioc->name, channel, id, sc, mf, ii);
+               dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT
+                   "completing cmds: fw_channel %d, fw_id %d, sc=%p, mf = %p, "
+                   "idx=%x\n", ioc->name, channel, id, sc, mf, ii));
                sc->scsi_done(sc);
        }
 }
@@ -1053,9 +999,11 @@ mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, VirtDevice *vdevice)
                        scsi_dma_unmap(sc);
                        sc->host_scribble = NULL;
                        sc->result = DID_NO_CONNECT << 16;
-                       sdev_printk(KERN_INFO, sc->device, MYIOC_s_FMT "completing cmds: fw_channel %d,"
-                          "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name, vdevice->vtarget->channel,
-                          vdevice->vtarget->id, sc, mf, ii);
+                       dtmprintk(ioc, sdev_printk(KERN_INFO, sc->device,
+                          MYIOC_s_FMT "completing cmds: fw_channel %d, "
+                          "fw_id %d, sc=%p, mf = %p, idx=%x\n", ioc->name,
+                          vdevice->vtarget->channel, vdevice->vtarget->id,
+                          sc, mf, ii));
                        sc->scsi_done(sc);
                        spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
                }
@@ -1346,7 +1294,6 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
        MPT_FRAME_HDR           *mf;
        SCSIIORequest_t         *pScsiReq;
        VirtDevice              *vdevice = SCpnt->device->hostdata;
-       int      lun;
        u32      datalen;
        u32      scsictl;
        u32      scsidir;
@@ -1357,13 +1304,12 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
 
        hd = shost_priv(SCpnt->device->host);
        ioc = hd->ioc;
-       lun = SCpnt->device->lun;
        SCpnt->scsi_done = done;
 
        dmfprintk(ioc, printk(MYIOC_s_DEBUG_FMT "qcmd: SCpnt=%p, done()=%p\n",
                ioc->name, SCpnt, done));
 
-       if (hd->resetPending) {
+       if (ioc->taskmgmt_quiesce_io) {
                dtmprintk(ioc, printk(MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
                        ioc->name, SCpnt));
                return SCSI_MLQUEUE_HOST_BUSY;
@@ -1422,7 +1368,7 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
        pScsiReq->CDBLength = SCpnt->cmd_len;
        pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
        pScsiReq->Reserved = 0;
-       pScsiReq->MsgFlags = mpt_msg_flags();
+       pScsiReq->MsgFlags = mpt_msg_flags(ioc);
        int_to_scsilun(SCpnt->device->lun, (struct scsi_lun *)pScsiReq->LUN);
        pScsiReq->Control = cpu_to_le32(scsictl);
 
@@ -1448,7 +1394,8 @@ mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
         */
        if (datalen == 0) {
                /* Add a NULL SGE */
-               mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0,
+               ioc->add_sge((char *)&pScsiReq->SGL,
+                       MPT_SGE_FLAGS_SSIMPLE_READ | 0,
                        (dma_addr_t) -1);
        } else {
                /* Add a 32 or 64 bit SGE */
@@ -1528,8 +1475,8 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
- *     mptscsih_TMHandler - Generic handler for SCSI Task Management.
- *     @hd: Pointer to MPT SCSI HOST structure
+ *     mptscsih_IssueTaskMgmt - Generic send Task Management function.
+ *     @hd: Pointer to MPT_SCSI_HOST structure
  *     @type: Task Management type
  *     @channel: channel number for task management
  *     @id: Logical Target ID for reset (if appropriate)
@@ -1537,145 +1484,68 @@ mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
  *     @ctx2abort: Context for the task to be aborted (if appropriate)
  *     @timeout: timeout for task management control
  *
- *     Fall through to mpt_HardResetHandler if: not operational, too many
- *     failed TM requests or handshake failure.
+ *     Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
+ *     or a non-interrupt thread.  In the former, must not call schedule().
  *
- *     Remark: Currently invoked from a non-interrupt thread (_bh).
+ *     Not all fields are meaningfull for all task types.
  *
- *     Note: With old EH code, at most 1 SCSI TaskMgmt function per IOC
- *     will be active.
+ *     Returns 0 for SUCCESS, or FAILED.
  *
- *     Returns 0 for SUCCESS, or %FAILED.
  **/
 int
-mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
+mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun,
+       int ctx2abort, ulong timeout)
 {
-       MPT_ADAPTER     *ioc;
-       int              rc = -1;
+       MPT_FRAME_HDR   *mf;
+       SCSITaskMgmt_t  *pScsiTm;
+       int              ii;
+       int              retval;
+       MPT_ADAPTER     *ioc = hd->ioc;
+       unsigned long    timeleft;
+       u8               issue_hard_reset;
        u32              ioc_raw_state;
-       unsigned long    flags;
-
-       ioc = hd->ioc;
-       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler Entered!\n", ioc->name));
-
-       // SJR - CHECKME - Can we avoid this here?
-       // (mpt_HardResetHandler has this check...)
-       spin_lock_irqsave(&ioc->diagLock, flags);
-       if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) {
-               spin_unlock_irqrestore(&ioc->diagLock, flags);
-               return FAILED;
-       }
-       spin_unlock_irqrestore(&ioc->diagLock, flags);
-
-       /*  Wait a fixed amount of time for the TM pending flag to be cleared.
-        *  If we time out and not bus reset, then we return a FAILED status
-        *  to the caller.
-        *  The call to mptscsih_tm_pending_wait() will set the pending flag
-        *  if we are
-        *  successful. Otherwise, reload the FW.
-        */
-       if (mptscsih_tm_pending_wait(hd) == FAILED) {
-               if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
-                       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler abort: "
-                          "Timed out waiting for last TM (%d) to complete! \n",
-                          ioc->name, hd->tmPending));
-                       return FAILED;
-               } else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
-                       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler target "
-                               "reset: Timed out waiting for last TM (%d) "
-                               "to complete! \n", ioc->name,
-                               hd->tmPending));
-                       return FAILED;
-               } else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
-                       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TMHandler bus reset: "
-                          "Timed out waiting for last TM (%d) to complete! \n",
-                         ioc->name, hd->tmPending));
-                       return FAILED;
-               }
-       } else {
-               spin_lock_irqsave(&ioc->FreeQlock, flags);
-               hd->tmPending |=  (1 << type);
-               spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-       }
+       unsigned long    time_count;
 
+       issue_hard_reset = 0;
        ioc_raw_state = mpt_GetIocState(ioc, 0);
 
        if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
                printk(MYIOC_s_WARN_FMT
-                       "TM Handler for type=%x: IOC Not operational (0x%x)!\n",
+                       "TaskMgmt type=%x: IOC Not operational (0x%x)!\n",
                        ioc->name, type, ioc_raw_state);
-               printk(MYIOC_s_WARN_FMT " Issuing HardReset!!\n", ioc->name);
+               printk(MYIOC_s_WARN_FMT "Issuing HardReset from %s!!\n",
+                   ioc->name, __func__);
                if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0)
-                       printk(MYIOC_s_WARN_FMT "TMHandler: HardReset "
+                       printk(MYIOC_s_WARN_FMT "TaskMgmt HardReset "
                            "FAILED!!\n", ioc->name);
-               return FAILED;
+               return 0;
        }
 
        if (ioc_raw_state & MPI_DOORBELL_ACTIVE) {
                printk(MYIOC_s_WARN_FMT
-                       "TM Handler for type=%x: ioc_state: "
+                       "TaskMgmt type=%x: ioc_state: "
                        "DOORBELL_ACTIVE (0x%x)!\n",
                        ioc->name, type, ioc_raw_state);
                return FAILED;
        }
 
-       /* Isse the Task Mgmt request.
-        */
-       if (hd->hard_resets < -1)
-               hd->hard_resets++;
-
-       rc = mptscsih_IssueTaskMgmt(hd, type, channel, id, lun,
-           ctx2abort, timeout);
-       if (rc)
-               printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n",
-                      ioc->name);
-       else
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Issue of TaskMgmt Successful!\n",
-                          ioc->name));
-
-       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-                       "TMHandler rc = %d!\n", ioc->name, rc));
-
-       return rc;
-}
-
-
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- *     mptscsih_IssueTaskMgmt - Generic send Task Management function.
- *     @hd: Pointer to MPT_SCSI_HOST structure
- *     @type: Task Management type
- *     @channel: channel number for task management
- *     @id: Logical Target ID for reset (if appropriate)
- *     @lun: Logical Unit for reset (if appropriate)
- *     @ctx2abort: Context for the task to be aborted (if appropriate)
- *     @timeout: timeout for task management control
- *
- *     Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
- *     or a non-interrupt thread.  In the former, must not call schedule().
- *
- *     Not all fields are meaningfull for all task types.
- *
- *     Returns 0 for SUCCESS, or FAILED.
- *
- **/
-static int
-mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout)
-{
-       MPT_FRAME_HDR   *mf;
-       SCSITaskMgmt_t  *pScsiTm;
-       int              ii;
-       int              retval;
-       MPT_ADAPTER     *ioc = hd->ioc;
+       mutex_lock(&ioc->taskmgmt_cmds.mutex);
+       if (mpt_set_taskmgmt_in_progress_flag(ioc) != 0) {
+               mf = NULL;
+               retval = FAILED;
+               goto out;
+       }
 
        /* Return Fail to calling function if no message frames available.
         */
        if ((mf = mpt_get_msg_frame(ioc->TaskCtx, ioc)) == NULL) {
-               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n",
-                               ioc->name));
-               return FAILED;
+               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                       "TaskMgmt no msg frames!!\n", ioc->name));
+               retval = FAILED;
+               mpt_clear_taskmgmt_in_progress_flag(ioc);
+               goto out;
        }
-       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt request @ %p\n",
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt request (mf=%p)\n",
                        ioc->name, mf));
 
        /* Format the Request
@@ -1699,11 +1569,14 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
 
        pScsiTm->TaskMsgContext = ctx2abort;
 
-       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "IssueTaskMgmt: ctx2abort (0x%08x) "
-               "type=%d\n", ioc->name, ctx2abort, type));
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt: ctx2abort (0x%08x) "
+               "task_type = 0x%02X, timeout = %ld\n", ioc->name, ctx2abort,
+               type, timeout));
 
        DBG_DUMP_TM_REQUEST_FRAME(ioc, (u32 *)pScsiTm);
 
+       INITIALIZE_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+       time_count = jiffies;
        if ((ioc->facts.IOCCapabilities & MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q) &&
            (ioc->facts.MsgVersion >= MPI_VERSION_01_05))
                mpt_put_msg_frame_hi_pri(ioc->TaskCtx, ioc, mf);
@@ -1711,47 +1584,50 @@ mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, i
                retval = mpt_send_handshake_request(ioc->TaskCtx, ioc,
                        sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP);
                if (retval) {
-                       dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "send_handshake FAILED!"
-                       " (hd %p, ioc %p, mf %p, rc=%d) \n", ioc->name, hd,
-                       ioc, mf, retval));
-                       goto fail_out;
+                       dfailprintk(ioc, printk(MYIOC_s_ERR_FMT
+                               "TaskMgmt handshake FAILED!(mf=%p, rc=%d) \n",
+                               ioc->name, mf, retval));
+                       mpt_free_msg_frame(ioc, mf);
+                       mpt_clear_taskmgmt_in_progress_flag(ioc);
+                       goto out;
                }
        }
 
-       if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) {
-               dfailprintk(ioc, printk(MYIOC_s_ERR_FMT "task management request TIMED OUT!"
-                       " (hd %p, ioc %p, mf %p) \n", ioc->name, hd,
-                       ioc, mf));
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Calling HardReset! \n",
-                        ioc->name));
-               retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rc=%d \n",
-                        ioc->name, retval));
-               goto fail_out;
+       timeleft = wait_for_completion_timeout(&ioc->taskmgmt_cmds.done,
+               timeout*HZ);
+       if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               retval = FAILED;
+               dtmprintk(ioc, printk(MYIOC_s_ERR_FMT
+                   "TaskMgmt TIMED OUT!(mf=%p)\n", ioc->name, mf));
+               mpt_clear_taskmgmt_in_progress_flag(ioc);
+               if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+                       goto out;
+               issue_hard_reset = 1;
+               goto out;
        }
 
-       /*
-        * Handle success case, see if theres a non-zero ioc_status.
-        */
-       if (hd->tm_iocstatus == MPI_IOCSTATUS_SUCCESS ||
-          hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
-          hd->tm_iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
-               retval = 0;
-       else
-               retval = FAILED;
+       retval = mptscsih_taskmgmt_reply(ioc, type,
+           (SCSITaskMgmtReply_t *) ioc->taskmgmt_cmds.reply);
 
-       return retval;
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "TaskMgmt completed (%d seconds)\n",
+           ioc->name, jiffies_to_msecs(jiffies - time_count)/1000));
 
fail_out:
+ out:
 
-       /*
-        * Free task management mf, and corresponding tm flags
-        */
-       mpt_free_msg_frame(ioc, mf);
-       hd->tmPending = 0;
-       hd->tmState = TM_STATE_NONE;
-       return FAILED;
+       CLEAR_MGMT_STATUS(ioc->taskmgmt_cmds.status)
+       if (issue_hard_reset) {
+               printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+                       ioc->name, __func__);
+               retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+               mpt_free_msg_frame(ioc, mf);
+       }
+
+       retval = (retval == 0) ? 0 : FAILED;
+       mutex_unlock(&ioc->taskmgmt_cmds.mutex);
+       return retval;
 }
+EXPORT_SYMBOL(mptscsih_IssueTaskMgmt);
 
 static int
 mptscsih_get_tm_timeout(MPT_ADAPTER *ioc)
@@ -1838,13 +1714,8 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
                goto out;
        }
 
-       if (hd->resetPending) {
-               retval = FAILED;
-               goto out;
-       }
-
-       if (hd->timeouts < -1)
-               hd->timeouts++;
+       if (ioc->timeouts < -1)
+               ioc->timeouts++;
 
        if (mpt_fwfault_debug)
                mpt_halt_firmware(ioc);
@@ -1861,22 +1732,30 @@ mptscsih_abort(struct scsi_cmnd * SCpnt)
 
        hd->abortSCpnt = SCpnt;
 
-       retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
-           vdevice->vtarget->channel, vdevice->vtarget->id, vdevice->lun,
-           ctx2abort, mptscsih_get_tm_timeout(ioc));
+       retval = mptscsih_IssueTaskMgmt(hd,
+                        MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+                        vdevice->vtarget->channel,
+                        vdevice->vtarget->id, vdevice->lun,
+                        ctx2abort, mptscsih_get_tm_timeout(ioc));
 
        if (SCPNT_TO_LOOKUP_IDX(ioc, SCpnt) == scpnt_idx &&
-           SCpnt->serial_number == sn)
+           SCpnt->serial_number == sn) {
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "task abort: command still in active list! (sc=%p)\n",
+                   ioc->name, SCpnt));
                retval = FAILED;
+       } else {
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "task abort: command cleared from active list! (sc=%p)\n",
+                   ioc->name, SCpnt));
+               retval = SUCCESS;
+       }
 
  out:
        printk(MYIOC_s_INFO_FMT "task abort: %s (sc=%p)\n",
-           ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
+           ioc->name, ((retval == SUCCESS) ? "SUCCESS" : "FAILED"), SCpnt);
 
-       if (retval == 0)
-               return SUCCESS;
-       else
-               return FAILED;
+       return retval;
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -1909,14 +1788,9 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
               ioc->name, SCpnt);
        scsi_print_command(SCpnt);
 
-       if (hd->resetPending) {
-               retval = FAILED;
-               goto out;
-       }
-
        vdevice = SCpnt->device->hostdata;
        if (!vdevice || !vdevice->vtarget) {
-               retval = 0;
+               retval = SUCCESS;
                goto out;
        }
 
@@ -1927,9 +1801,11 @@ mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
                goto out;
        }
 
-       retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
-           vdevice->vtarget->channel, vdevice->vtarget->id, 0, 0,
-           mptscsih_get_tm_timeout(ioc));
+       retval = mptscsih_IssueTaskMgmt(hd,
+                               MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+                               vdevice->vtarget->channel,
+                               vdevice->vtarget->id, 0, 0,
+                               mptscsih_get_tm_timeout(ioc));
 
  out:
        printk (MYIOC_s_INFO_FMT "target reset: %s (sc=%p)\n",
@@ -1972,12 +1848,16 @@ mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
               ioc->name, SCpnt);
        scsi_print_command(SCpnt);
 
-       if (hd->timeouts < -1)
-               hd->timeouts++;
+       if (ioc->timeouts < -1)
+               ioc->timeouts++;
 
        vdevice = SCpnt->device->hostdata;
-       retval = mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
-           vdevice->vtarget->channel, 0, 0, 0, mptscsih_get_tm_timeout(ioc));
+       if (!vdevice || !vdevice->vtarget)
+               return SUCCESS;
+       retval = mptscsih_IssueTaskMgmt(hd,
+                                       MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+                                       vdevice->vtarget->channel, 0, 0, 0,
+                                       mptscsih_get_tm_timeout(ioc));
 
        printk(MYIOC_s_INFO_FMT "bus reset: %s (sc=%p)\n",
            ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
@@ -2001,8 +1881,9 @@ int
 mptscsih_host_reset(struct scsi_cmnd *SCpnt)
 {
        MPT_SCSI_HOST *  hd;
-       int              retval;
+       int              status = SUCCESS;
        MPT_ADAPTER     *ioc;
+       int             retval;
 
        /*  If we can't locate the host to reset, then we failed. */
        if ((hd = shost_priv(SCpnt->device->host)) == NULL){
@@ -2021,86 +1902,71 @@ mptscsih_host_reset(struct scsi_cmnd *SCpnt)
        /*  If our attempts to reset the host failed, then return a failed
         *  status.  The host will be taken off line by the SCSI mid-layer.
         */
-       if (mpt_HardResetHandler(ioc, CAN_SLEEP) < 0) {
-               retval = FAILED;
-       } else {
-               /*  Make sure TM pending is cleared and TM state is set to
-                *  NONE.
-                */
-               retval = 0;
-               hd->tmPending = 0;
-               hd->tmState = TM_STATE_NONE;
-       }
+    retval = mpt_HardResetHandler(ioc, CAN_SLEEP);
+       if (retval < 0)
+               status = FAILED;
+       else
+               status = SUCCESS;
 
        printk(MYIOC_s_INFO_FMT "host reset: %s (sc=%p)\n",
            ioc->name, ((retval == 0) ? "SUCCESS" : "FAILED" ), SCpnt);
 
-       return retval;
+       return status;
 }
 
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- *     mptscsih_tm_pending_wait - wait for pending task management request to complete
- *     @hd: Pointer to MPT host structure.
- *
- *     Returns {SUCCESS,FAILED}.
- */
 static int
-mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
+mptscsih_taskmgmt_reply(MPT_ADAPTER *ioc, u8 type,
+       SCSITaskMgmtReply_t *pScsiTmReply)
 {
-       unsigned long  flags;
-       int            loop_count = 4 * 10;  /* Wait 10 seconds */
-       int            status = FAILED;
-       MPT_ADAPTER     *ioc = hd->ioc;
+       u16                      iocstatus;
+       u32                      termination_count;
+       int                      retval;
 
-       do {
-               spin_lock_irqsave(&ioc->FreeQlock, flags);
-               if (hd->tmState == TM_STATE_NONE) {
-                       hd->tmState = TM_STATE_IN_PROGRESS;
-                       hd->tmPending = 1;
-                       spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-                       status = SUCCESS;
-                       break;
-               }
-               spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-               msleep(250);
-       } while (--loop_count);
+       if (!(ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_RF_VALID)) {
+               retval = FAILED;
+               goto out;
+       }
 
-       return status;
-}
+       DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
 
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/**
- *     mptscsih_tm_wait_for_completion - wait for completion of TM task
- *     @hd: Pointer to MPT host structure.
- *     @timeout: timeout value
- *
- *     Returns {SUCCESS,FAILED}.
- */
-static int
-mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
-{
-       unsigned long  flags;
-       int            loop_count = 4 * timeout;
-       int            status = FAILED;
-       MPT_ADAPTER     *ioc = hd->ioc;
+       iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+       termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
 
-       do {
-               spin_lock_irqsave(&ioc->FreeQlock, flags);
-               if(hd->tmPending == 0) {
-                       status = SUCCESS;
-                       spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-                       break;
-               }
-               spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-               msleep(250);
-       } while (--loop_count);
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "TaskMgmt fw_channel = %d, fw_id = %d, task_type = 0x%02X,\n"
+           "\tiocstatus = 0x%04X, loginfo = 0x%08X, response_code = 0x%02X,\n"
+           "\tterm_cmnds = %d\n", ioc->name, pScsiTmReply->Bus,
+           pScsiTmReply->TargetID, type, le16_to_cpu(pScsiTmReply->IOCStatus),
+           le32_to_cpu(pScsiTmReply->IOCLogInfo), pScsiTmReply->ResponseCode,
+           termination_count));
 
-       return status;
+       if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
+           pScsiTmReply->ResponseCode)
+               mptscsih_taskmgmt_response_code(ioc,
+                   pScsiTmReply->ResponseCode);
+
+       if (iocstatus == MPI_IOCSTATUS_SUCCESS) {
+               retval = 0;
+               goto out;
+       }
+
+       retval = FAILED;
+       if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+               if (termination_count == 1)
+                       retval = 0;
+               goto out;
+       }
+
+       if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_TERMINATED ||
+          iocstatus == MPI_IOCSTATUS_SCSI_IOC_TERMINATED)
+               retval = 0;
+
+ out:
+       return retval;
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-static void
+void
 mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
 {
        char *desc;
@@ -2134,6 +2000,7 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
        printk(MYIOC_s_INFO_FMT "Response Code(0x%08x): F/W: %s\n",
                ioc->name, response_code, desc);
 }
+EXPORT_SYMBOL(mptscsih_taskmgmt_response_code);
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
@@ -2150,97 +2017,28 @@ mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code)
  *     Returns 1 indicating alloc'd request frame ptr should be freed.
  **/
 int
-mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
+       MPT_FRAME_HDR *mr)
 {
-       SCSITaskMgmtReply_t     *pScsiTmReply;
-       SCSITaskMgmt_t          *pScsiTmReq;
-       MPT_SCSI_HOST           *hd;
-       unsigned long            flags;
-       u16                      iocstatus;
-       u8                       tmType;
-       u32                      termination_count;
-
-       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "TaskMgmt completed (mf=%p,mr=%p)\n",
-           ioc->name, mf, mr));
-       if (!ioc->sh) {
-               dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
-                   "TaskMgmt Complete: NULL Scsi Host Ptr\n", ioc->name));
-               return 1;
-       }
-
-       if (mr == NULL) {
-               dtmprintk(ioc, printk(MYIOC_s_WARN_FMT
-                   "ERROR! TaskMgmt Reply: NULL Request %p\n", ioc->name, mf));
-               return 1;
-       }
-
-       hd = shost_priv(ioc->sh);
-       pScsiTmReply = (SCSITaskMgmtReply_t*)mr;
-       pScsiTmReq = (SCSITaskMgmt_t*)mf;
-       tmType = pScsiTmReq->TaskType;
-       iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
-       termination_count = le32_to_cpu(pScsiTmReply->TerminationCount);
+       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+               "TaskMgmt completed (mf=%p, mr=%p)\n", ioc->name, mf, mr));
 
-       if (ioc->facts.MsgVersion >= MPI_VERSION_01_05 &&
-           pScsiTmReply->ResponseCode)
-               mptscsih_taskmgmt_response_code(ioc,
-                   pScsiTmReply->ResponseCode);
-       DBG_DUMP_TM_REPLY_FRAME(ioc, (u32 *)pScsiTmReply);
+       ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
 
-#ifdef CONFIG_FUSION_LOGGING
-       if ((ioc->debug_level & MPT_DEBUG_REPLY) ||
-                               (ioc->debug_level & MPT_DEBUG_TM ))
-               printk("%s: ha=%d [%d:%d:0] task_type=0x%02X "
-                       "iocstatus=0x%04X\n\tloginfo=0x%08X response_code=0x%02X "
-                       "term_cmnds=%d\n", __func__, ioc->id, pScsiTmReply->Bus,
-                        pScsiTmReply->TargetID, pScsiTmReq->TaskType,
-                       le16_to_cpu(pScsiTmReply->IOCStatus),
-                       le32_to_cpu(pScsiTmReply->IOCLogInfo),pScsiTmReply->ResponseCode,
-                       le32_to_cpu(pScsiTmReply->TerminationCount));
-#endif
-       if (!iocstatus) {
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT " TaskMgmt SUCCESS\n", ioc->name));
-                       hd->abortSCpnt = NULL;
+       if (!mr)
                goto out;
-       }
-
-       /* Error?  (anything non-zero?) */
-
-       /* clear flags and continue.
-        */
-       switch (tmType) {
-
-       case MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK:
-               if (termination_count == 1)
-                       iocstatus = MPI_IOCSTATUS_SCSI_TASK_TERMINATED;
-               hd->abortSCpnt = NULL;
-               break;
-
-       case MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS:
-
-               /* If an internal command is present
-                * or the TM failed - reload the FW.
-                * FC FW may respond FAILED to an ABORT
-                */
-               if (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED ||
-                   hd->cmdPtr)
-                       if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
-                               printk(MYIOC_s_WARN_FMT " Firmware Reload FAILED!!\n", ioc->name);
-               break;
-
-       case MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET:
-       default:
-               break;
-       }
 
+       ioc->taskmgmt_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+       memcpy(ioc->taskmgmt_cmds.reply, mr,
+           min(MPT_DEFAULT_FRAME_SIZE, 4 * mr->u.reply.MsgLength));
  out:
-       spin_lock_irqsave(&ioc->FreeQlock, flags);
-       hd->tmPending = 0;
-       hd->tmState = TM_STATE_NONE;
-       hd->tm_iocstatus = iocstatus;
-       spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-
-       return 1;
+       if (ioc->taskmgmt_cmds.status & MPT_MGMT_STATUS_PENDING) {
+               mpt_clear_taskmgmt_in_progress_flag(ioc);
+               ioc->taskmgmt_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+               complete(&ioc->taskmgmt_cmds.done);
+               return 1;
+       }
+       return 0;
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -2290,8 +2088,10 @@ int
 mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
 {
        struct inactive_raid_component_info *component_info;
-       int i;
+       int i, j;
+       RaidPhysDiskPage1_t *phys_disk;
        int rc = 0;
+       int num_paths;
 
        if (!ioc->raid_data.pIocPg3)
                goto out;
@@ -2303,6 +2103,45 @@ mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id)
                }
        }
 
+       if (ioc->bus_type != SAS)
+               goto out;
+
+       /*
+        * Check if dual path
+        */
+       for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+               num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
+                   ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
+               if (num_paths < 2)
+                       continue;
+               phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+                  (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+               if (!phys_disk)
+                       continue;
+               if ((mpt_raid_phys_disk_pg1(ioc,
+                   ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
+                   phys_disk))) {
+                       kfree(phys_disk);
+                       continue;
+               }
+               for (j = 0; j < num_paths; j++) {
+                       if ((phys_disk->Path[j].Flags &
+                           MPI_RAID_PHYSDISK1_FLAG_INVALID))
+                               continue;
+                       if ((phys_disk->Path[j].Flags &
+                           MPI_RAID_PHYSDISK1_FLAG_BROKEN))
+                               continue;
+                       if ((id == phys_disk->Path[j].PhysDiskID) &&
+                           (channel == phys_disk->Path[j].PhysDiskBus)) {
+                               rc = 1;
+                               kfree(phys_disk);
+                               goto out;
+                       }
+               }
+               kfree(phys_disk);
+       }
+
+
        /*
         * Check inactive list for matching phys disks
         */
@@ -2327,8 +2166,10 @@ u8
 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
 {
        struct inactive_raid_component_info *component_info;
-       int i;
+       int i, j;
+       RaidPhysDiskPage1_t *phys_disk;
        int rc = -ENXIO;
+       int num_paths;
 
        if (!ioc->raid_data.pIocPg3)
                goto out;
@@ -2340,6 +2181,44 @@ mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id)
                }
        }
 
+       if (ioc->bus_type != SAS)
+               goto out;
+
+       /*
+        * Check if dual path
+        */
+       for (i = 0; i < ioc->raid_data.pIocPg3->NumPhysDisks; i++) {
+               num_paths = mpt_raid_phys_disk_get_num_paths(ioc,
+                   ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum);
+               if (num_paths < 2)
+                       continue;
+               phys_disk = kzalloc(offsetof(RaidPhysDiskPage1_t, Path) +
+                  (num_paths * sizeof(RAID_PHYS_DISK1_PATH)), GFP_KERNEL);
+               if (!phys_disk)
+                       continue;
+               if ((mpt_raid_phys_disk_pg1(ioc,
+                   ioc->raid_data.pIocPg3->PhysDisk[i].PhysDiskNum,
+                   phys_disk))) {
+                       kfree(phys_disk);
+                       continue;
+               }
+               for (j = 0; j < num_paths; j++) {
+                       if ((phys_disk->Path[j].Flags &
+                           MPI_RAID_PHYSDISK1_FLAG_INVALID))
+                               continue;
+                       if ((phys_disk->Path[j].Flags &
+                           MPI_RAID_PHYSDISK1_FLAG_BROKEN))
+                               continue;
+                       if ((id == phys_disk->Path[j].PhysDiskID) &&
+                           (channel == phys_disk->Path[j].PhysDiskBus)) {
+                               rc = phys_disk->PhysDiskNum;
+                               kfree(phys_disk);
+                               goto out;
+                       }
+               }
+               kfree(phys_disk);
+       }
+
        /*
         * Check inactive list for matching phys disks
         */
@@ -2457,7 +2336,6 @@ mptscsih_slave_configure(struct scsi_device *sdev)
                    sdev->ppr, sdev->inquiry_len));
 
        vdevice->configured_lun = 1;
-       mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
 
        dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
                "Queue depth=%d, tflags=%x\n",
@@ -2469,6 +2347,7 @@ mptscsih_slave_configure(struct scsi_device *sdev)
                    ioc->name, vtarget->negoFlags, vtarget->maxOffset,
                    vtarget->minSyncFactor));
 
+       mptscsih_change_queue_depth(sdev, MPT_SCSI_CMD_PER_DEV_HIGH);
        dsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
                "tagged %d, simple %d, ordered %d\n",
                ioc->name,sdev->tagged_supported, sdev->simple_tags,
@@ -2542,15 +2421,13 @@ mptscsih_copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR
 }
 
 /**
- * mptscsih_get_scsi_lookup
+ * mptscsih_get_scsi_lookup - retrieves scmd entry
  * @ioc: Pointer to MPT_ADAPTER structure
  * @i: index into the array
  *
- * retrieves scmd entry from ScsiLookup[] array list
- *
  * Returns the scsi_cmd pointer
- **/
-static struct scsi_cmnd *
+ */
+struct scsi_cmnd *
 mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
 {
        unsigned long   flags;
@@ -2562,15 +2439,15 @@ mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i)
 
        return scmd;
 }
+EXPORT_SYMBOL(mptscsih_get_scsi_lookup);
 
 /**
- * mptscsih_getclear_scsi_lookup
+ * mptscsih_getclear_scsi_lookup -  retrieves and clears scmd entry from ScsiLookup[] array list
  * @ioc: Pointer to MPT_ADAPTER structure
  * @i: index into the array
  *
- * retrieves and clears scmd entry from ScsiLookup[] array list
- *
  * Returns the scsi_cmd pointer
+ *
  **/
 static struct scsi_cmnd *
 mptscsih_getclear_scsi_lookup(MPT_ADAPTER *ioc, int i)
@@ -2635,94 +2512,33 @@ int
 mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 {
        MPT_SCSI_HOST   *hd;
-       unsigned long    flags;
 
-       dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-           ": IOC %s_reset routed to SCSI host driver!\n",
-           ioc->name, reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
-           reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
-
-       /* If a FW reload request arrives after base installed but
-        * before all scsi hosts have been attached, then an alt_ioc
-        * may have a NULL sh pointer.
-        */
        if (ioc->sh == NULL || shost_priv(ioc->sh) == NULL)
                return 0;
-       else
-               hd = shost_priv(ioc->sh);
-
-       if (reset_phase == MPT_IOC_SETUP_RESET) {
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Setup-Diag Reset\n", ioc->name));
-
-               /* Clean Up:
-                * 1. Set Hard Reset Pending Flag
-                * All new commands go to doneQ
-                */
-               hd->resetPending = 1;
-
-       } else if (reset_phase == MPT_IOC_PRE_RESET) {
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Diag Reset\n", ioc->name));
 
-               /* 2. Flush running commands
-                *      Clean ScsiLookup (and associated memory)
-                *      AND clean mytaskQ
-                */
-
-               /* 2b. Reply to OS all known outstanding I/O commands.
-                */
+       hd = shost_priv(ioc->sh);
+       switch (reset_phase) {
+       case MPT_IOC_SETUP_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_SETUP_RESET\n", ioc->name, __func__));
+               break;
+       case MPT_IOC_PRE_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_PRE_RESET\n", ioc->name, __func__));
                mptscsih_flush_running_cmds(hd);
-
-               /* 2c. If there was an internal command that
-                * has not completed, configuration or io request,
-                * free these resources.
-                */
-               if (hd->cmdPtr) {
-                       del_timer(&hd->timer);
-                       mpt_free_msg_frame(ioc, hd->cmdPtr);
-               }
-
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Pre-Reset complete.\n", ioc->name));
-
-       } else {
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Diag Reset\n", ioc->name));
-
-               /* Once a FW reload begins, all new OS commands are
-                * redirected to the doneQ w/ a reset status.
-                * Init all control structures.
-                */
-
-               /* 2. Chain Buffer initialization
-                */
-
-               /* 4. Renegotiate to all devices, if SPI
-                */
-
-               /* 5. Enable new commands to be posted
-                */
-               spin_lock_irqsave(&ioc->FreeQlock, flags);
-               hd->tmPending = 0;
-               spin_unlock_irqrestore(&ioc->FreeQlock, flags);
-               hd->resetPending = 0;
-               hd->tmState = TM_STATE_NONE;
-
-               /* 6. If there was an internal command,
-                * wake this process up.
-                */
-               if (hd->cmdPtr) {
-                       /*
-                        * Wake up the original calling thread
-                        */
-                       hd->pLocal = &hd->localReply;
-                       hd->pLocal->completion = MPT_SCANDV_DID_RESET;
-                       hd->scandv_wait_done = 1;
-                       wake_up(&hd->scandv_waitq);
-                       hd->cmdPtr = NULL;
+               break;
+       case MPT_IOC_POST_RESET:
+               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: MPT_IOC_POST_RESET\n", ioc->name, __func__));
+               if (ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING) {
+                       ioc->internal_cmds.status |=
+                               MPT_MGMT_STATUS_DID_IOCRESET;
+                       complete(&ioc->internal_cmds.done);
                }
-
-               dtmprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Post-Reset complete.\n", ioc->name));
-
+               break;
+       default:
+               break;
        }
-
        return 1;               /* currently means nothing really */
 }
 
@@ -2730,55 +2546,16 @@ mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
 int
 mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
 {
-       MPT_SCSI_HOST *hd;
        u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
 
-       devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
-                       ioc->name, event));
-
-       if (ioc->sh == NULL ||
-               ((hd = shost_priv(ioc->sh)) == NULL))
-               return 1;
-
-       switch (event) {
-       case MPI_EVENT_UNIT_ATTENTION:                  /* 03 */
-               /* FIXME! */
-               break;
-       case MPI_EVENT_IOC_BUS_RESET:                   /* 04 */
-       case MPI_EVENT_EXT_BUS_RESET:                   /* 05 */
-               if (hd && (ioc->bus_type == SPI) && (hd->soft_resets < -1))
-                       hd->soft_resets++;
-               break;
-       case MPI_EVENT_LOGOUT:                          /* 09 */
-               /* FIXME! */
-               break;
-
-       case MPI_EVENT_RESCAN:                          /* 06 */
-               break;
-
-               /*
-                *  CHECKME! Don't think we need to do
-                *  anything for these, but...
-                */
-       case MPI_EVENT_LINK_STATUS_CHANGE:              /* 07 */
-       case MPI_EVENT_LOOP_STATE_CHANGE:               /* 08 */
-               /*
-                *  CHECKME!  Falling thru...
-                */
-               break;
-
-       case MPI_EVENT_INTEGRATED_RAID:                 /* 0B */
-               break;
+       devtverboseprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+               "MPT event (=%02Xh) routed to SCSI host driver!\n",
+               ioc->name, event));
 
-       case MPI_EVENT_NONE:                            /* 00 */
-       case MPI_EVENT_LOG_DATA:                        /* 01 */
-       case MPI_EVENT_STATE_CHANGE:                    /* 02 */
-       case MPI_EVENT_EVENT_CHANGE:                    /* 0A */
-       default:
-               dprintk(ioc, printk(MYIOC_s_DEBUG_FMT ": Ignoring event (=%02Xh)\n",
-                   ioc->name, event));
-               break;
-       }
+       if ((event == MPI_EVENT_IOC_BUS_RESET ||
+           event == MPI_EVENT_EXT_BUS_RESET) &&
+           (ioc->bus_type == SPI) && (ioc->soft_resets < -1))
+                       ioc->soft_resets++;
 
        return 1;               /* currently means nothing really */
 }
@@ -2809,153 +2586,44 @@ mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
  *     Used ONLY for DV and other internal commands.
  */
 int
-mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+                               MPT_FRAME_HDR *reply)
 {
-       MPT_SCSI_HOST   *hd;
        SCSIIORequest_t *pReq;
-       int              completionCode;
+       SCSIIOReply_t   *pReply;
+       u8               cmd;
        u16              req_idx;
+       u8      *sense_data;
+       int              sz;
 
-       hd = shost_priv(ioc->sh);
-
-       if ((mf == NULL) ||
-           (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
-               printk(MYIOC_s_ERR_FMT
-                       "ScanDvComplete, %s req frame ptr! (=%p)\n",
-                               ioc->name, mf?"BAD":"NULL", (void *) mf);
-               goto wakeup;
-       }
-
-       del_timer(&hd->timer);
-       req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
-       mptscsih_set_scsi_lookup(ioc, req_idx, NULL);
-       pReq = (SCSIIORequest_t *) mf;
+       ioc->internal_cmds.status |= MPT_MGMT_STATUS_COMMAND_GOOD;
+       ioc->internal_cmds.completion_code = MPT_SCANDV_GOOD;
+       if (!reply)
+               goto out;
 
-       if (mf != hd->cmdPtr) {
-               printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n",
-                               ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx);
+       pReply = (SCSIIOReply_t *) reply;
+       pReq = (SCSIIORequest_t *) req;
+       ioc->internal_cmds.completion_code =
+           mptscsih_get_completion_code(ioc, req, reply);
+       ioc->internal_cmds.status |= MPT_MGMT_STATUS_RF_VALID;
+       memcpy(ioc->internal_cmds.reply, reply,
+           min(MPT_DEFAULT_FRAME_SIZE, 4 * reply->u.reply.MsgLength));
+       cmd = reply->u.hdr.Function;
+       if (((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+           (cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) &&
+           (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)) {
+               req_idx = le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
+               sense_data = ((u8 *)ioc->sense_buf_pool +
+                   (req_idx * MPT_SENSE_BUFFER_ALLOC));
+               sz = min_t(int, pReq->SenseBufferLength,
+                   MPT_SENSE_BUFFER_ALLOC);
+               memcpy(ioc->internal_cmds.sense, sense_data, sz);
        }
-       hd->cmdPtr = NULL;
-
-       ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n",
-                       ioc->name, mf, mr, req_idx));
-
-       hd->pLocal = &hd->localReply;
-       hd->pLocal->scsiStatus = 0;
-
-       /* If target struct exists, clear sense valid flag.
-        */
-       if (mr == NULL) {
-               completionCode = MPT_SCANDV_GOOD;
-       } else {
-               SCSIIOReply_t   *pReply;
-               u16              status;
-               u8               scsi_status;
-
-               pReply = (SCSIIOReply_t *) mr;
-
-               status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
-               scsi_status = pReply->SCSIStatus;
-
-
-               switch(status) {
-
-               case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:       /* 0x0043 */
-                       completionCode = MPT_SCANDV_SELECTION_TIMEOUT;
-                       break;
-
-               case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:          /* 0x0046 */
-               case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:        /* 0x0048 */
-               case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:         /* 0x004B */
-               case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:         /* 0x004C */
-                       completionCode = MPT_SCANDV_DID_RESET;
-                       break;
-
-               case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:          /* 0x0045 */
-               case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:        /* 0x0040 */
-               case MPI_IOCSTATUS_SUCCESS:                     /* 0x0000 */
-                       if (pReply->Function == MPI_FUNCTION_CONFIG) {
-                               ConfigReply_t *pr = (ConfigReply_t *)mr;
-                               completionCode = MPT_SCANDV_GOOD;
-                               hd->pLocal->header.PageVersion = pr->Header.PageVersion;
-                               hd->pLocal->header.PageLength = pr->Header.PageLength;
-                               hd->pLocal->header.PageNumber = pr->Header.PageNumber;
-                               hd->pLocal->header.PageType = pr->Header.PageType;
-
-                       } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
-                               /* If the RAID Volume request is successful,
-                                * return GOOD, else indicate that
-                                * some type of error occurred.
-                                */
-                               MpiRaidActionReply_t    *pr = (MpiRaidActionReply_t *)mr;
-                               if (le16_to_cpu(pr->ActionStatus) == MPI_RAID_ACTION_ASTATUS_SUCCESS)
-                                       completionCode = MPT_SCANDV_GOOD;
-                               else
-                                       completionCode = MPT_SCANDV_SOME_ERROR;
-                               memcpy(hd->pLocal->sense, pr, sizeof(hd->pLocal->sense));
-
-                       } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
-                               u8              *sense_data;
-                               int              sz;
-
-                               /* save sense data in global structure
-                                */
-                               completionCode = MPT_SCANDV_SENSE;
-                               hd->pLocal->scsiStatus = scsi_status;
-                               sense_data = ((u8 *)ioc->sense_buf_pool +
-                                       (req_idx * MPT_SENSE_BUFFER_ALLOC));
-
-                               sz = min_t(int, pReq->SenseBufferLength,
-                                                       SCSI_STD_SENSE_BYTES);
-                               memcpy(hd->pLocal->sense, sense_data, sz);
-
-                               ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "  Check Condition, sense ptr %p\n",
-                                   ioc->name, sense_data));
-                       } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
-                               if (pReq->CDB[0] == INQUIRY)
-                                       completionCode = MPT_SCANDV_ISSUE_SENSE;
-                               else
-                                       completionCode = MPT_SCANDV_DID_RESET;
-                       }
-                       else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
-                               completionCode = MPT_SCANDV_DID_RESET;
-                       else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
-                               completionCode = MPT_SCANDV_DID_RESET;
-                       else {
-                               completionCode = MPT_SCANDV_GOOD;
-                               hd->pLocal->scsiStatus = scsi_status;
-                       }
-                       break;
-
-               case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:         /* 0x0047 */
-                       if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
-                               completionCode = MPT_SCANDV_DID_RESET;
-                       else
-                               completionCode = MPT_SCANDV_SOME_ERROR;
-                       break;
-
-               default:
-                       completionCode = MPT_SCANDV_SOME_ERROR;
-                       break;
-
-               }       /* switch(status) */
-
-       } /* end of address reply case */
-
-       hd->pLocal->completion = completionCode;
-
-       /* MF and RF are freed in mpt_interrupt
-        */
-wakeup:
-       /* Free Chain buffers (will never chain) in scan or dv */
-       //mptscsih_freeChainBuffers(ioc, req_idx);
-
-       /*
-        * Wake up the original calling thread
-        */
-       hd->scandv_wait_done = 1;
-       wake_up(&hd->scandv_waitq);
-
+ out:
+       if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_PENDING))
+               return 0;
+       ioc->internal_cmds.status &= ~MPT_MGMT_STATUS_PENDING;
+       complete(&ioc->internal_cmds.done);
        return 1;
 }
 
@@ -3004,6 +2672,95 @@ mptscsih_timer_expired(unsigned long data)
        return;
 }
 
+/**
+ *     mptscsih_get_completion_code -
+ *     @ioc: Pointer to MPT_ADAPTER structure
+ *     @reply:
+ *     @cmd:
+ *
+ **/
+static int
+mptscsih_get_completion_code(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req,
+                               MPT_FRAME_HDR *reply)
+{
+       SCSIIOReply_t   *pReply;
+       MpiRaidActionReply_t *pr;
+       u8               scsi_status;
+       u16              status;
+       int              completion_code;
+
+       pReply = (SCSIIOReply_t *)reply;
+       status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+       scsi_status = pReply->SCSIStatus;
+
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh,"
+           "IOCLogInfo=%08xh\n", ioc->name, status, pReply->SCSIState,
+           scsi_status, le32_to_cpu(pReply->IOCLogInfo)));
+
+       switch (status) {
+
+       case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:       /* 0x0043 */
+               completion_code = MPT_SCANDV_SELECTION_TIMEOUT;
+               break;
+
+       case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:          /* 0x0046 */
+       case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:        /* 0x0048 */
+       case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:         /* 0x004B */
+       case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:         /* 0x004C */
+               completion_code = MPT_SCANDV_DID_RESET;
+               break;
+
+       case MPI_IOCSTATUS_BUSY:
+       case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:
+               completion_code = MPT_SCANDV_BUSY;
+               break;
+
+       case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:          /* 0x0045 */
+       case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:        /* 0x0040 */
+       case MPI_IOCSTATUS_SUCCESS:                     /* 0x0000 */
+               if (pReply->Function == MPI_FUNCTION_CONFIG) {
+                       completion_code = MPT_SCANDV_GOOD;
+               } else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
+                       pr = (MpiRaidActionReply_t *)reply;
+                       if (le16_to_cpu(pr->ActionStatus) ==
+                               MPI_RAID_ACTION_ASTATUS_SUCCESS)
+                               completion_code = MPT_SCANDV_GOOD;
+                       else
+                               completion_code = MPT_SCANDV_SOME_ERROR;
+               } else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID)
+                       completion_code = MPT_SCANDV_SENSE;
+               else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
+                       if (req->u.scsireq.CDB[0] == INQUIRY)
+                               completion_code = MPT_SCANDV_ISSUE_SENSE;
+                       else
+                               completion_code = MPT_SCANDV_DID_RESET;
+               } else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
+                       completion_code = MPT_SCANDV_DID_RESET;
+               else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
+                       completion_code = MPT_SCANDV_DID_RESET;
+               else if (scsi_status == MPI_SCSI_STATUS_BUSY)
+                       completion_code = MPT_SCANDV_BUSY;
+               else
+                       completion_code = MPT_SCANDV_GOOD;
+               break;
+
+       case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:         /* 0x0047 */
+               if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
+                       completion_code = MPT_SCANDV_DID_RESET;
+               else
+                       completion_code = MPT_SCANDV_SOME_ERROR;
+               break;
+       default:
+               completion_code = MPT_SCANDV_SOME_ERROR;
+               break;
+
+       }       /* switch(status) */
+
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "  completionCode set to %08xh\n", ioc->name, completion_code));
+       return completion_code;
+}
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
 /**
@@ -3030,22 +2787,27 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
 {
        MPT_FRAME_HDR   *mf;
        SCSIIORequest_t *pScsiReq;
-       SCSIIORequest_t  ReqCopy;
        int              my_idx, ii, dir;
-       int              rc, cmdTimeout;
-       int             in_isr;
+       int              timeout;
        char             cmdLen;
        char             CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
-       char             cmd = io->cmd;
-       MPT_ADAPTER     *ioc = hd->ioc;
+       u8               cmd = io->cmd;
+       MPT_ADAPTER *ioc = hd->ioc;
+       int              ret = 0;
+       unsigned long    timeleft;
+       unsigned long    flags;
 
-       in_isr = in_interrupt();
-       if (in_isr) {
-               dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Internal SCSI IO request not allowed in ISR context!\n",
-                                       ioc->name));
-               return -EPERM;
+       /* don't send internal command during diag reset */
+       spin_lock_irqsave(&ioc->taskmgmt_lock, flags);
+       if (ioc->ioc_reset_in_progress) {
+               spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
+               dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                       "%s: busy with host reset\n", ioc->name, __func__));
+               return MPT_SCANDV_BUSY;
        }
+       spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 
+       mutex_lock(&ioc->internal_cmds.mutex);
 
        /* Set command specific information
         */
@@ -3055,13 +2817,13 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
                dir = MPI_SCSIIO_CONTROL_READ;
                CDB[0] = cmd;
                CDB[4] = io->size;
-               cmdTimeout = 10;
+               timeout = 10;
                break;
 
        case TEST_UNIT_READY:
                cmdLen = 6;
                dir = MPI_SCSIIO_CONTROL_READ;
-               cmdTimeout = 10;
+               timeout = 10;
                break;
 
        case START_STOP:
@@ -3069,7 +2831,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
                dir = MPI_SCSIIO_CONTROL_READ;
                CDB[0] = cmd;
                CDB[4] = 1;     /*Spin up the disk */
-               cmdTimeout = 15;
+               timeout = 15;
                break;
 
        case REQUEST_SENSE:
@@ -3077,7 +2839,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
                CDB[0] = cmd;
                CDB[4] = io->size;
                dir = MPI_SCSIIO_CONTROL_READ;
-               cmdTimeout = 10;
+               timeout = 10;
                break;
 
        case READ_BUFFER:
@@ -3096,7 +2858,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
                CDB[6] = (io->size >> 16) & 0xFF;
                CDB[7] = (io->size >>  8) & 0xFF;
                CDB[8] = io->size & 0xFF;
-               cmdTimeout = 10;
+               timeout = 10;
                break;
 
        case WRITE_BUFFER:
@@ -3111,21 +2873,21 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
                CDB[6] = (io->size >> 16) & 0xFF;
                CDB[7] = (io->size >>  8) & 0xFF;
                CDB[8] = io->size & 0xFF;
-               cmdTimeout = 10;
+               timeout = 10;
                break;
 
        case RESERVE:
                cmdLen = 6;
                dir = MPI_SCSIIO_CONTROL_READ;
                CDB[0] = cmd;
-               cmdTimeout = 10;
+               timeout = 10;
                break;
 
        case RELEASE:
                cmdLen = 6;
                dir = MPI_SCSIIO_CONTROL_READ;
                CDB[0] = cmd;
-               cmdTimeout = 10;
+               timeout = 10;
                break;
 
        case SYNCHRONIZE_CACHE:
@@ -3133,20 +2895,23 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
                dir = MPI_SCSIIO_CONTROL_READ;
                CDB[0] = cmd;
 //             CDB[1] = 0x02;  /* set immediate bit */
-               cmdTimeout = 10;
+               timeout = 10;
                break;
 
        default:
                /* Error Case */
-               return -EFAULT;
+               ret = -EFAULT;
+               goto out;
        }
 
        /* Get and Populate a free Frame
+        * MsgContext set in mpt_get_msg_frame call
         */
        if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
-               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "No msg frames!\n",
-                   ioc->name));
-               return -EBUSY;
+               dfailprintk(ioc, printk(MYIOC_s_WARN_FMT "%s: No msg frames!\n",
+                   ioc->name, __func__));
+               ret = MPT_SCANDV_BUSY;
+               goto out;
        }
 
        pScsiReq = (SCSIIORequest_t *) mf;
@@ -3172,7 +2937,7 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
 
        pScsiReq->Reserved = 0;
 
-       pScsiReq->MsgFlags = mpt_msg_flags();
+       pScsiReq->MsgFlags = mpt_msg_flags(ioc);
        /* MsgContext set in mpt_get_msg_fram call  */
 
        int_to_scsilun(io->lun, (struct scsi_lun *)pScsiReq->LUN);
@@ -3184,74 +2949,58 @@ mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
 
        if (cmd == REQUEST_SENSE) {
                pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
-               ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Untagged! 0x%2x\n",
-                       ioc->name, cmd));
+               devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: Untagged! 0x%02x\n", ioc->name, __func__, cmd));
        }
 
-       for (ii=0; ii < 16; ii++)
+       for (ii = 0; ii < 16; ii++)
                pScsiReq->CDB[ii] = CDB[ii];
 
        pScsiReq->DataLength = cpu_to_le32(io->size);
        pScsiReq->SenseBufferLowAddr = cpu_to_le32(ioc->sense_buf_low_dma
                                           + (my_idx * MPT_SENSE_BUFFER_ALLOC));
 
-       ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Sending Command 0x%x for (%d:%d:%d)\n",
-                       ioc->name, cmd, io->channel, io->id, io->lun));
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+           "%s: Sending Command 0x%02x for fw_channel=%d fw_id=%d lun=%d\n",
+           ioc->name, __func__, cmd, io->channel, io->id, io->lun));
 
-       if (dir == MPI_SCSIIO_CONTROL_READ) {
-               mpt_add_sge((char *) &pScsiReq->SGL,
-                       MPT_SGE_FLAGS_SSIMPLE_READ | io->size,
-                       io->data_dma);
-       } else {
-               mpt_add_sge((char *) &pScsiReq->SGL,
-                       MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size,
-                       io->data_dma);
-       }
-
-       /* The ISR will free the request frame, but we need
-        * the information to initialize the target. Duplicate.
-        */
-       memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t));
-
-       /* Issue this command after:
-        *      finish init
-        *      add timer
-        * Wait until the reply has been received
-        *  ScsiScanDvCtx callback function will
-        *      set hd->pLocal;
-        *      set scandv_wait_done and call wake_up
-        */
-       hd->pLocal = NULL;
-       hd->timer.expires = jiffies + HZ*cmdTimeout;
-       hd->scandv_wait_done = 0;
-
-       /* Save cmd pointer, for resource free if timeout or
-        * FW reload occurs
-        */
-       hd->cmdPtr = mf;
+       if (dir == MPI_SCSIIO_CONTROL_READ)
+               ioc->add_sge((char *) &pScsiReq->SGL,
+                   MPT_SGE_FLAGS_SSIMPLE_READ | io->size, io->data_dma);
+       else
+               ioc->add_sge((char *) &pScsiReq->SGL,
+                   MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size, io->data_dma);
 
-       add_timer(&hd->timer);
+       INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
        mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
-       wait_event(hd->scandv_waitq, hd->scandv_wait_done);
-
-       if (hd->pLocal) {
-               rc = hd->pLocal->completion;
-               hd->pLocal->skip = 0;
-
-               /* Always set fatal error codes in some cases.
-                */
-               if (rc == MPT_SCANDV_SELECTION_TIMEOUT)
-                       rc = -ENXIO;
-               else if (rc == MPT_SCANDV_SOME_ERROR)
-                       rc =  -rc;
-       } else {
-               rc = -EFAULT;
-               /* This should never happen. */
-               ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "_do_cmd: Null pLocal!!!\n",
-                               ioc->name));
+       timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done,
+           timeout*HZ);
+       if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               ret = MPT_SCANDV_DID_RESET;
+               dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT
+                   "%s: TIMED OUT for cmd=0x%02x\n", ioc->name, __func__,
+                   cmd));
+               if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET) {
+                       mpt_free_msg_frame(ioc, mf);
+                       goto out;
+               }
+               if (!timeleft) {
+                       printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+                           ioc->name, __func__);
+                       mpt_HardResetHandler(ioc, CAN_SLEEP);
+                       mpt_free_msg_frame(ioc, mf);
+               }
+               goto out;
        }
 
-       return rc;
+       ret = ioc->internal_cmds.completion_code;
+       devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: success, rc=0x%02x\n",
+                       ioc->name, __func__, ret));
+
+ out:
+       CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+       mutex_unlock(&ioc->internal_cmds.mutex);
+       return ret;
 }
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
@@ -3491,6 +3240,7 @@ struct device_attribute *mptscsih_host_attrs[] = {
        &dev_attr_debug_level,
        NULL,
 };
+
 EXPORT_SYMBOL(mptscsih_host_attrs);
 
 EXPORT_SYMBOL(mptscsih_remove);
@@ -3516,6 +3266,5 @@ EXPORT_SYMBOL(mptscsih_event_process);
 EXPORT_SYMBOL(mptscsih_ioc_reset);
 EXPORT_SYMBOL(mptscsih_change_queue_depth);
 EXPORT_SYMBOL(mptscsih_timer_expired);
-EXPORT_SYMBOL(mptscsih_TMHandler);
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
index 319aa3033371999e8c75b2f151a84942744d04fb..eb3f677528ac703b1dac56f6795441fc155108df 100644 (file)
@@ -60,6 +60,7 @@
 #define MPT_SCANDV_SELECTION_TIMEOUT   (0x00000008)
 #define MPT_SCANDV_ISSUE_SENSE         (0x00000010)
 #define MPT_SCANDV_FALLBACK            (0x00000020)
+#define MPT_SCANDV_BUSY                        (0x00000040)
 
 #define MPT_SCANDV_MAX_RETRIES         (10)
 
@@ -89,6 +90,7 @@
 
 #endif
 
+
 typedef struct _internal_cmd {
        char            *data;          /* data pointer */
        dma_addr_t      data_dma;       /* data dma address */
@@ -112,6 +114,8 @@ extern int mptscsih_resume(struct pci_dev *pdev);
 extern int mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset, int length, int func);
 extern const char * mptscsih_info(struct Scsi_Host *SChost);
 extern int mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *));
+extern int mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel,
+       u8 id, int lun, int ctx2abort, ulong timeout);
 extern void mptscsih_slave_destroy(struct scsi_device *device);
 extern int mptscsih_slave_configure(struct scsi_device *device);
 extern int mptscsih_abort(struct scsi_cmnd * SCpnt);
@@ -126,7 +130,8 @@ extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pE
 extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
 extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
 extern void mptscsih_timer_expired(unsigned long data);
-extern int mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 id, int lun, int ctx2abort, ulong timeout);
 extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern struct device_attribute *mptscsih_host_attrs[];
+extern struct scsi_cmnd        *mptscsih_get_scsi_lookup(MPT_ADAPTER *ioc, int i);
+extern void mptscsih_taskmgmt_response_code(MPT_ADAPTER *ioc, u8 response_code);
index 61620144e49c32c82a834e9911035dc675691f1e..c5b808fd55ba6fc2b523939836abe347e85aeeea 100644 (file)
@@ -300,7 +300,7 @@ mptspi_writeIOCPage4(MPT_SCSI_HOST *hd, u8 channel , u8 id)
        flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
                (IOCPage4Ptr->Header.PageLength + ii) * 4;
 
-       mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
+       ioc->add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
 
        ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
                "writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
@@ -614,19 +614,24 @@ static void mptspi_read_parameters(struct scsi_target *starget)
        spi_width(starget) = (nego & MPI_SCSIDEVPAGE0_NP_WIDE) ? 1 : 0;
 }
 
-static int
+int
 mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
 {
+       MPT_ADAPTER     *ioc = hd->ioc;
        MpiRaidActionRequest_t  *pReq;
        MPT_FRAME_HDR           *mf;
-       MPT_ADAPTER *ioc = hd->ioc;
+       int                     ret;
+       unsigned long           timeleft;
+
+       mutex_lock(&ioc->internal_cmds.mutex);
 
        /* Get and Populate a free Frame
         */
        if ((mf = mpt_get_msg_frame(ioc->InternalCtx, ioc)) == NULL) {
-               ddvprintk(ioc, printk(MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n",
-                                       ioc->name));
-               return -EAGAIN;
+               dfailprintk(hd->ioc, printk(MYIOC_s_WARN_FMT
+                       "%s: no msg frames!\n", ioc->name, __func__));
+               ret = -EAGAIN;
+               goto out;
        }
        pReq = (MpiRaidActionRequest_t *)mf;
        if (quiesce)
@@ -643,29 +648,36 @@ mptscsih_quiesce_raid(MPT_SCSI_HOST *hd, int quiesce, u8 channel, u8 id)
        pReq->Reserved2 = 0;
        pReq->ActionDataWord = 0; /* Reserved for this action */
 
-       mpt_add_sge((char *)&pReq->ActionDataSGE,
+       ioc->add_sge((char *)&pReq->ActionDataSGE,
                MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
 
        ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "RAID Volume action=%x channel=%d id=%d\n",
                        ioc->name, pReq->Action, channel, id));
 
-       hd->pLocal = NULL;
-       hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
-       hd->scandv_wait_done = 0;
-
-       /* Save cmd pointer, for resource free if timeout or
-        * FW reload occurs
-        */
-       hd->cmdPtr = mf;
-
-       add_timer(&hd->timer);
+       INITIALIZE_MGMT_STATUS(ioc->internal_cmds.status)
        mpt_put_msg_frame(ioc->InternalCtx, ioc, mf);
-       wait_event(hd->scandv_waitq, hd->scandv_wait_done);
+       timeleft = wait_for_completion_timeout(&ioc->internal_cmds.done, 10*HZ);
+       if (!(ioc->internal_cmds.status & MPT_MGMT_STATUS_COMMAND_GOOD)) {
+               ret = -ETIME;
+               dfailprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: TIMED OUT!\n",
+                   ioc->name, __func__));
+               if (ioc->internal_cmds.status & MPT_MGMT_STATUS_DID_IOCRESET)
+                       goto out;
+               if (!timeleft) {
+                       printk(MYIOC_s_WARN_FMT "Issuing Reset from %s!!\n",
+                           ioc->name, __func__);
+                       mpt_HardResetHandler(ioc, CAN_SLEEP);
+                       mpt_free_msg_frame(ioc, mf);
+               }
+               goto out;
+       }
 
-       if ((hd->pLocal == NULL) || (hd->pLocal->completion != 0))
-               return -1;
+       ret = ioc->internal_cmds.completion_code;
 
-       return 0;
+ out:
+       CLEAR_MGMT_STATUS(ioc->internal_cmds.status)
+       mutex_unlock(&ioc->internal_cmds.mutex);
+       return ret;
 }
 
 static void mptspi_dv_device(struct _MPT_SCSI_HOST *hd,
@@ -1423,17 +1435,15 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         * A slightly different algorithm is required for
         * 64bit SGEs.
         */
-       scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
-       if (sizeof(dma_addr_t) == sizeof(u64)) {
+       scale = ioc->req_sz/ioc->SGE_size;
+       if (ioc->sg_addr_size == sizeof(u64)) {
                numSGE = (scale - 1) *
                  (ioc->facts.MaxChainDepth-1) + scale +
-                 (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
-                 sizeof(u32));
+                 (ioc->req_sz - 60) / ioc->SGE_size;
        } else {
                numSGE = 1 + (scale - 1) *
                  (ioc->facts.MaxChainDepth-1) + scale +
-                 (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
-                 sizeof(u32));
+                 (ioc->req_sz - 64) / ioc->SGE_size;
        }
 
        if (numSGE < sh->sg_tablesize) {
@@ -1464,9 +1474,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
        /* Clear the TM flags
         */
-       hd->tmPending = 0;
-       hd->tmState = TM_STATE_NONE;
-       hd->resetPending = 0;
        hd->abortSCpnt = NULL;
 
        /* Clear the pointer used to store
@@ -1493,8 +1500,6 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                mpt_saf_te));
        ioc->spi_data.noQas = 0;
 
-       init_waitqueue_head(&hd->scandv_waitq);
-       hd->scandv_wait_done = 0;
        hd->last_queue_full = 0;
        hd->spi_pending = 0;
 
@@ -1514,7 +1519,7 @@ mptspi_probe(struct pci_dev *pdev, const struct pci_device_id *id)
         * issue internal bus reset
         */
        if (ioc->spi_data.bus_reset)
-               mptscsih_TMHandler(hd,
+               mptscsih_IssueTaskMgmt(hd,
                    MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
                    0, 0, 0, 0, 5);
 
index 214a92d1ef75938fd58f9adea4289074f5e11121..f3c4a3b910bb0d1c16620695662b86136a6cd061 100644 (file)
@@ -2264,6 +2264,17 @@ config BNX2
          To compile this driver as a module, choose M here: the module
          will be called bnx2.  This is recommended.
 
+config CNIC
+       tristate "Broadcom CNIC support"
+       depends on BNX2
+       depends on UIO
+       help
+         This driver supports offload features of Broadcom NetXtremeII
+         gigabit Ethernet cards.
+
+         To compile this driver as a module, choose M here: the module
+         will be called cnic.  This is recommended.
+
 config SPIDER_NET
        tristate "Spider Gigabit Ethernet driver"
        depends on PCI && (PPC_IBM_CELL_BLADE || PPC_CELLEB)
index a1c25cb4669fb8b9a95b7a1410355423d46b5599..db30ebd7b262336bb3949da5db58e7173d4bfa64 100644 (file)
@@ -73,6 +73,7 @@ obj-$(CONFIG_STNIC) += stnic.o 8390.o
 obj-$(CONFIG_FEALNX) += fealnx.o
 obj-$(CONFIG_TIGON3) += tg3.o
 obj-$(CONFIG_BNX2) += bnx2.o
+obj-$(CONFIG_CNIC) += cnic.o
 obj-$(CONFIG_BNX2X) += bnx2x.o
 bnx2x-objs := bnx2x_main.o bnx2x_link.o
 spidernet-y += spider_net.o spider_net_ethtool.o
index b0cb29d4cc01e735b7855e99f7e3a7c81650d3db..3f5fcb0156a180a49977f200b7a16fe5e19f6a38 100644 (file)
 #include <linux/firmware.h>
 #include <linux/log2.h>
 
+#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
+#define BCM_CNIC 1
+#include "cnic_if.h"
+#endif
 #include "bnx2.h"
 #include "bnx2_fw.h"
 
@@ -315,6 +319,158 @@ bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
        spin_unlock_bh(&bp->indirect_lock);
 }
 
+#ifdef BCM_CNIC
+static int
+bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+       struct drv_ctl_io *io = &info->data.io;
+
+       switch (info->cmd) {
+       case DRV_CTL_IO_WR_CMD:
+               bnx2_reg_wr_ind(bp, io->offset, io->data);
+               break;
+       case DRV_CTL_IO_RD_CMD:
+               io->data = bnx2_reg_rd_ind(bp, io->offset);
+               break;
+       case DRV_CTL_CTX_WR_CMD:
+               bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
+{
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+       int sb_id;
+
+       if (bp->flags & BNX2_FLAG_USING_MSIX) {
+               cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
+               bnapi->cnic_present = 0;
+               sb_id = bp->irq_nvecs;
+               cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
+       } else {
+               cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
+               bnapi->cnic_tag = bnapi->last_status_idx;
+               bnapi->cnic_present = 1;
+               sb_id = 0;
+               cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
+       }
+
+       cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
+       cp->irq_arr[0].status_blk = (void *)
+               ((unsigned long) bnapi->status_blk.msi +
+               (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
+       cp->irq_arr[0].status_blk_num = sb_id;
+       cp->num_irq = 1;
+}
+
+static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
+                             void *data)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+       if (ops == NULL)
+               return -EINVAL;
+
+       if (cp->drv_state & CNIC_DRV_STATE_REGD)
+               return -EBUSY;
+
+       bp->cnic_data = data;
+       rcu_assign_pointer(bp->cnic_ops, ops);
+
+       cp->num_irq = 0;
+       cp->drv_state = CNIC_DRV_STATE_REGD;
+
+       bnx2_setup_cnic_irq_info(bp);
+
+       return 0;
+}
+
+static int bnx2_unregister_cnic(struct net_device *dev)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+       cp->drv_state = 0;
+       bnapi->cnic_present = 0;
+       rcu_assign_pointer(bp->cnic_ops, NULL);
+       synchronize_rcu();
+       return 0;
+}
+
+struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
+{
+       struct bnx2 *bp = netdev_priv(dev);
+       struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
+
+       cp->drv_owner = THIS_MODULE;
+       cp->chip_id = bp->chip_id;
+       cp->pdev = bp->pdev;
+       cp->io_base = bp->regview;
+       cp->drv_ctl = bnx2_drv_ctl;
+       cp->drv_register_cnic = bnx2_register_cnic;
+       cp->drv_unregister_cnic = bnx2_unregister_cnic;
+
+       return cp;
+}
+EXPORT_SYMBOL(bnx2_cnic_probe);
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+       struct cnic_ops *c_ops;
+       struct cnic_ctl_info info;
+
+       rcu_read_lock();
+       c_ops = rcu_dereference(bp->cnic_ops);
+       if (c_ops) {
+               info.cmd = CNIC_CTL_STOP_CMD;
+               c_ops->cnic_ctl(bp->cnic_data, &info);
+       }
+       rcu_read_unlock();
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+       struct cnic_ops *c_ops;
+       struct cnic_ctl_info info;
+
+       rcu_read_lock();
+       c_ops = rcu_dereference(bp->cnic_ops);
+       if (c_ops) {
+               if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
+                       struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
+
+                       bnapi->cnic_tag = bnapi->last_status_idx;
+               }
+               info.cmd = CNIC_CTL_START_CMD;
+               c_ops->cnic_ctl(bp->cnic_data, &info);
+       }
+       rcu_read_unlock();
+}
+
+#else
+
+static void
+bnx2_cnic_stop(struct bnx2 *bp)
+{
+}
+
+static void
+bnx2_cnic_start(struct bnx2 *bp)
+{
+}
+
+#endif
+
 static int
 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
 {
@@ -488,6 +644,7 @@ bnx2_napi_enable(struct bnx2 *bp)
 static void
 bnx2_netif_stop(struct bnx2 *bp)
 {
+       bnx2_cnic_stop(bp);
        bnx2_disable_int_sync(bp);
        if (netif_running(bp->dev)) {
                bnx2_napi_disable(bp);
@@ -504,6 +661,7 @@ bnx2_netif_start(struct bnx2 *bp)
                        netif_tx_wake_all_queues(bp->dev);
                        bnx2_napi_enable(bp);
                        bnx2_enable_int(bp);
+                       bnx2_cnic_start(bp);
                }
        }
 }
@@ -3164,6 +3322,11 @@ bnx2_has_work(struct bnx2_napi *bnapi)
        if (bnx2_has_fast_work(bnapi))
                return 1;
 
+#ifdef BCM_CNIC
+       if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
+               return 1;
+#endif
+
        if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
            (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
                return 1;
@@ -3193,6 +3356,23 @@ bnx2_chk_missed_msi(struct bnx2 *bp)
        bp->idle_chk_status_idx = bnapi->last_status_idx;
 }
 
+#ifdef BCM_CNIC
+static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
+{
+       struct cnic_ops *c_ops;
+
+       if (!bnapi->cnic_present)
+               return;
+
+       rcu_read_lock();
+       c_ops = rcu_dereference(bp->cnic_ops);
+       if (c_ops)
+               bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
+                                                     bnapi->status_blk.msi);
+       rcu_read_unlock();
+}
+#endif
+
 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
 {
        struct status_block *sblk = bnapi->status_blk.msi;
@@ -3267,6 +3447,10 @@ static int bnx2_poll(struct napi_struct *napi, int budget)
 
                work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
 
+#ifdef BCM_CNIC
+               bnx2_poll_cnic(bp, bnapi);
+#endif
+
                /* bnapi->last_status_idx is used below to tell the hw how
                 * much work has been processed, so we must read it before
                 * checking for more work.
@@ -4632,8 +4816,11 @@ bnx2_init_chip(struct bnx2 *bp)
        val = REG_RD(bp, BNX2_MQ_CONFIG);
        val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
        val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
-       if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
-               val |= BNX2_MQ_CONFIG_HALT_DIS;
+       if (CHIP_NUM(bp) == CHIP_NUM_5709) {
+               val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
+               if (CHIP_REV(bp) == CHIP_REV_Ax)
+                       val |= BNX2_MQ_CONFIG_HALT_DIS;
+       }
 
        REG_WR(bp, BNX2_MQ_CONFIG, val);
 
@@ -7471,7 +7658,7 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
        INIT_WORK(&bp->reset_task, bnx2_reset_task);
 
        dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
-       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
+       mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
        dev->mem_end = dev->mem_start + mem_len;
        dev->irq = pdev->irq;
 
index 5b570e17c839a2d35db6fba9f27da71a1efaf41e..a1ff739bc9b5e5709f6e3093bf516ace7d148515 100644 (file)
@@ -361,6 +361,9 @@ struct l2_fhdr {
 #define BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE       (1<<28)
 
 #define BNX2_L2CTX_HOST_BDIDX                          0x00000004
+#define BNX2_L2CTX_STATUSB_NUM_SHIFT                    16
+#define BNX2_L2CTX_STATUSB_NUM(sb_id)                   \
+       (((sb_id) > 0) ? (((sb_id) + 7) << BNX2_L2CTX_STATUSB_NUM_SHIFT) : 0)
 #define BNX2_L2CTX_HOST_BSEQ                           0x00000008
 #define BNX2_L2CTX_NX_BSEQ                             0x0000000c
 #define BNX2_L2CTX_NX_BDHADDR_HI                       0x00000010
@@ -5900,6 +5903,7 @@ struct l2_fhdr {
 #define BNX2_RXP_FTQ_CTL_CUR_DEPTH                      (0x3ffL<<22)
 
 #define BNX2_RXP_SCRATCH                               0x000e0000
+#define BNX2_RXP_SCRATCH_RXP_FLOOD                      0x000e0024
 #define BNX2_RXP_SCRATCH_RSS_TBL_SZ                     0x000e0038
 #define BNX2_RXP_SCRATCH_RSS_TBL                        0x000e003c
 #define BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES            128
@@ -6678,6 +6682,11 @@ struct bnx2_napi {
        u32                     last_status_idx;
        u32                     int_num;
 
+#ifdef BCM_CNIC
+       u32                     cnic_tag;
+       int                     cnic_present;
+#endif
+
        struct bnx2_rx_ring_info        rx_ring;
        struct bnx2_tx_ring_info        tx_ring;
 };
@@ -6727,6 +6736,11 @@ struct bnx2 {
        int             tx_ring_size;
        u32             tx_wake_thresh;
 
+#ifdef BCM_CNIC
+       struct cnic_ops         *cnic_ops;
+       void                    *cnic_data;
+#endif
+
        /* End of fields used in the performance code paths. */
 
        unsigned int            current_interval;
@@ -6885,6 +6899,10 @@ struct bnx2 {
 
        u32                     idle_chk_status_idx;
 
+#ifdef BCM_CNIC
+       struct cnic_eth_dev     cnic_eth_dev;
+#endif
+
        const struct firmware   *mips_firmware;
        const struct firmware   *rv2p_firmware;
 };
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
new file mode 100644 (file)
index 0000000..8d74037
--- /dev/null
@@ -0,0 +1,2711 @@
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
+ * Modified and maintained by: Michael Chan <mchan@broadcom.com>
+ */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/uio_driver.h>
+#include <linux/in.h>
+#include <linux/dma-mapping.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
+#define BCM_VLAN 1
+#endif
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <scsi/iscsi_if.h>
+
+#include "cnic_if.h"
+#include "bnx2.h"
+#include "cnic.h"
+#include "cnic_defs.h"
+
+#define DRV_MODULE_NAME                "cnic"
+#define PFX DRV_MODULE_NAME    ": "
+
+static char version[] __devinitdata =
+       "Broadcom NetXtreme II CNIC Driver " DRV_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+
+MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
+             "Chen (zongxi@broadcom.com");
+MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(CNIC_MODULE_VERSION);
+
+static LIST_HEAD(cnic_dev_list);
+static DEFINE_RWLOCK(cnic_dev_lock);
+static DEFINE_MUTEX(cnic_lock);
+
+static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
+
+static int cnic_service_bnx2(void *, void *);
+static int cnic_ctl(void *, struct cnic_ctl_info *);
+
+static struct cnic_ops cnic_bnx2_ops = {
+       .cnic_owner     = THIS_MODULE,
+       .cnic_handler   = cnic_service_bnx2,
+       .cnic_ctl       = cnic_ctl,
+};
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *);
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *);
+static int cnic_cm_set_pg(struct cnic_sock *);
+
+static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
+{
+       struct cnic_dev *dev = uinfo->priv;
+       struct cnic_local *cp = dev->cnic_priv;
+
+       if (!capable(CAP_NET_ADMIN))
+               return -EPERM;
+
+       if (cp->uio_dev != -1)
+               return -EBUSY;
+
+       cp->uio_dev = iminor(inode);
+
+       cnic_shutdown_bnx2_rx_ring(dev);
+
+       cnic_init_bnx2_tx_ring(dev);
+       cnic_init_bnx2_rx_ring(dev);
+
+       return 0;
+}
+
+static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
+{
+       struct cnic_dev *dev = uinfo->priv;
+       struct cnic_local *cp = dev->cnic_priv;
+
+       cp->uio_dev = -1;
+       return 0;
+}
+
+static inline void cnic_hold(struct cnic_dev *dev)
+{
+       atomic_inc(&dev->ref_count);
+}
+
+static inline void cnic_put(struct cnic_dev *dev)
+{
+       atomic_dec(&dev->ref_count);
+}
+
+static inline void csk_hold(struct cnic_sock *csk)
+{
+       atomic_inc(&csk->ref_count);
+}
+
+static inline void csk_put(struct cnic_sock *csk)
+{
+       atomic_dec(&csk->ref_count);
+}
+
+static struct cnic_dev *cnic_from_netdev(struct net_device *netdev)
+{
+       struct cnic_dev *cdev;
+
+       read_lock(&cnic_dev_lock);
+       list_for_each_entry(cdev, &cnic_dev_list, list) {
+               if (netdev == cdev->netdev) {
+                       cnic_hold(cdev);
+                       read_unlock(&cnic_dev_lock);
+                       return cdev;
+               }
+       }
+       read_unlock(&cnic_dev_lock);
+       return NULL;
+}
+
+static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       struct drv_ctl_info info;
+       struct drv_ctl_io *io = &info.data.io;
+
+       info.cmd = DRV_CTL_CTX_WR_CMD;
+       io->cid_addr = cid_addr;
+       io->offset = off;
+       io->data = val;
+       ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       struct drv_ctl_info info;
+       struct drv_ctl_io *io = &info.data.io;
+
+       info.cmd = DRV_CTL_IO_WR_CMD;
+       io->offset = off;
+       io->data = val;
+       ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static u32 cnic_reg_rd_ind(struct cnic_dev *dev, u32 off)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       struct drv_ctl_info info;
+       struct drv_ctl_io *io = &info.data.io;
+
+       info.cmd = DRV_CTL_IO_RD_CMD;
+       io->offset = off;
+       ethdev->drv_ctl(dev->netdev, &info);
+       return io->data;
+}
+
+static int cnic_in_use(struct cnic_sock *csk)
+{
+       return test_bit(SK_F_INUSE, &csk->flags);
+}
+
+static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       struct drv_ctl_info info;
+
+       info.cmd = DRV_CTL_COMPLETION_CMD;
+       info.data.comp.comp_count = count;
+       ethdev->drv_ctl(dev->netdev, &info);
+}
+
+static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
+                          struct cnic_sock *csk)
+{
+       struct iscsi_path path_req;
+       char *buf = NULL;
+       u16 len = 0;
+       u32 msg_type = ISCSI_KEVENT_IF_DOWN;
+       struct cnic_ulp_ops *ulp_ops;
+
+       if (cp->uio_dev == -1)
+               return -ENODEV;
+
+       if (csk) {
+               len = sizeof(path_req);
+               buf = (char *) &path_req;
+               memset(&path_req, 0, len);
+
+               msg_type = ISCSI_KEVENT_PATH_REQ;
+               path_req.handle = (u64) csk->l5_cid;
+               if (test_bit(SK_F_IPV6, &csk->flags)) {
+                       memcpy(&path_req.dst.v6_addr, &csk->dst_ip[0],
+                              sizeof(struct in6_addr));
+                       path_req.ip_addr_len = 16;
+               } else {
+                       memcpy(&path_req.dst.v4_addr, &csk->dst_ip[0],
+                              sizeof(struct in_addr));
+                       path_req.ip_addr_len = 4;
+               }
+               path_req.vlan_id = csk->vlan_id;
+               path_req.pmtu = csk->mtu;
+       }
+
+       rcu_read_lock();
+       ulp_ops = rcu_dereference(cp->ulp_ops[CNIC_ULP_ISCSI]);
+       if (ulp_ops)
+               ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len);
+       rcu_read_unlock();
+       return 0;
+}
+
+static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
+                                 char *buf, u16 len)
+{
+       int rc = -EINVAL;
+
+       switch (msg_type) {
+       case ISCSI_UEVENT_PATH_UPDATE: {
+               struct cnic_local *cp;
+               u32 l5_cid;
+               struct cnic_sock *csk;
+               struct iscsi_path *path_resp;
+
+               if (len < sizeof(*path_resp))
+                       break;
+
+               path_resp = (struct iscsi_path *) buf;
+               cp = dev->cnic_priv;
+               l5_cid = (u32) path_resp->handle;
+               if (l5_cid >= MAX_CM_SK_TBL_SZ)
+                       break;
+
+               csk = &cp->csk_tbl[l5_cid];
+               csk_hold(csk);
+               if (cnic_in_use(csk)) {
+                       memcpy(csk->ha, path_resp->mac_addr, 6);
+                       if (test_bit(SK_F_IPV6, &csk->flags))
+                               memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
+                                      sizeof(struct in6_addr));
+                       else
+                               memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
+                                      sizeof(struct in_addr));
+                       if (is_valid_ether_addr(csk->ha))
+                               cnic_cm_set_pg(csk);
+               }
+               csk_put(csk);
+               rc = 0;
+       }
+       }
+
+       return rc;
+}
+
+static int cnic_offld_prep(struct cnic_sock *csk)
+{
+       if (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+               return 0;
+
+       if (!test_bit(SK_F_CONNECT_START, &csk->flags)) {
+               clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+               return 0;
+       }
+
+       return 1;
+}
+
+static int cnic_close_prep(struct cnic_sock *csk)
+{
+       clear_bit(SK_F_CONNECT_START, &csk->flags);
+       smp_mb__after_clear_bit();
+
+       if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+               while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+                       msleep(1);
+
+               return 1;
+       }
+       return 0;
+}
+
+static int cnic_abort_prep(struct cnic_sock *csk)
+{
+       clear_bit(SK_F_CONNECT_START, &csk->flags);
+       smp_mb__after_clear_bit();
+
+       while (test_and_set_bit(SK_F_OFFLD_SCHED, &csk->flags))
+               msleep(1);
+
+       if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
+               csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+               return 1;
+       }
+
+       return 0;
+}
+
+int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
+{
+       struct cnic_dev *dev;
+
+       if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+               printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
+                      ulp_type);
+               return -EINVAL;
+       }
+       mutex_lock(&cnic_lock);
+       if (cnic_ulp_tbl[ulp_type]) {
+               printk(KERN_ERR PFX "cnic_register_driver: Type %d has already "
+                                   "been registered\n", ulp_type);
+               mutex_unlock(&cnic_lock);
+               return -EBUSY;
+       }
+
+       read_lock(&cnic_dev_lock);
+       list_for_each_entry(dev, &cnic_dev_list, list) {
+               struct cnic_local *cp = dev->cnic_priv;
+
+               clear_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]);
+       }
+       read_unlock(&cnic_dev_lock);
+
+       rcu_assign_pointer(cnic_ulp_tbl[ulp_type], ulp_ops);
+       mutex_unlock(&cnic_lock);
+
+       /* Prevent race conditions with netdev_event */
+       rtnl_lock();
+       read_lock(&cnic_dev_lock);
+       list_for_each_entry(dev, &cnic_dev_list, list) {
+               struct cnic_local *cp = dev->cnic_priv;
+
+               if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
+                       ulp_ops->cnic_init(dev);
+       }
+       read_unlock(&cnic_dev_lock);
+       rtnl_unlock();
+
+       return 0;
+}
+
+int cnic_unregister_driver(int ulp_type)
+{
+       struct cnic_dev *dev;
+
+       if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+               printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
+                      ulp_type);
+               return -EINVAL;
+       }
+       mutex_lock(&cnic_lock);
+       if (!cnic_ulp_tbl[ulp_type]) {
+               printk(KERN_ERR PFX "cnic_unregister_driver: Type %d has not "
+                                   "been registered\n", ulp_type);
+               goto out_unlock;
+       }
+       read_lock(&cnic_dev_lock);
+       list_for_each_entry(dev, &cnic_dev_list, list) {
+               struct cnic_local *cp = dev->cnic_priv;
+
+               if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+                       printk(KERN_ERR PFX "cnic_unregister_driver: Type %d "
+                              "still has devices registered\n", ulp_type);
+                       read_unlock(&cnic_dev_lock);
+                       goto out_unlock;
+               }
+       }
+       read_unlock(&cnic_dev_lock);
+
+       rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
+
+       mutex_unlock(&cnic_lock);
+       synchronize_rcu();
+       return 0;
+
+out_unlock:
+       mutex_unlock(&cnic_lock);
+       return -EINVAL;
+}
+
+static int cnic_start_hw(struct cnic_dev *);
+static void cnic_stop_hw(struct cnic_dev *);
+
+static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
+                               void *ulp_ctx)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_ulp_ops *ulp_ops;
+
+       if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+               printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
+                      ulp_type);
+               return -EINVAL;
+       }
+       mutex_lock(&cnic_lock);
+       if (cnic_ulp_tbl[ulp_type] == NULL) {
+               printk(KERN_ERR PFX "cnic_register_device: Driver with type %d "
+                                   "has not been registered\n", ulp_type);
+               mutex_unlock(&cnic_lock);
+               return -EAGAIN;
+       }
+       if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+               printk(KERN_ERR PFX "cnic_register_device: Type %d has already "
+                      "been registered to this device\n", ulp_type);
+               mutex_unlock(&cnic_lock);
+               return -EBUSY;
+       }
+
+       clear_bit(ULP_F_START, &cp->ulp_flags[ulp_type]);
+       cp->ulp_handle[ulp_type] = ulp_ctx;
+       ulp_ops = cnic_ulp_tbl[ulp_type];
+       rcu_assign_pointer(cp->ulp_ops[ulp_type], ulp_ops);
+       cnic_hold(dev);
+
+       if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+               if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[ulp_type]))
+                       ulp_ops->cnic_start(cp->ulp_handle[ulp_type]);
+
+       mutex_unlock(&cnic_lock);
+
+       return 0;
+
+}
+EXPORT_SYMBOL(cnic_register_driver);
+
+static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+
+       if (ulp_type >= MAX_CNIC_ULP_TYPE) {
+               printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
+                      ulp_type);
+               return -EINVAL;
+       }
+       mutex_lock(&cnic_lock);
+       if (rcu_dereference(cp->ulp_ops[ulp_type])) {
+               rcu_assign_pointer(cp->ulp_ops[ulp_type], NULL);
+               cnic_put(dev);
+       } else {
+               printk(KERN_ERR PFX "cnic_unregister_device: device not "
+                      "registered to this ulp type %d\n", ulp_type);
+               mutex_unlock(&cnic_lock);
+               return -EINVAL;
+       }
+       mutex_unlock(&cnic_lock);
+
+       synchronize_rcu();
+
+       return 0;
+}
+EXPORT_SYMBOL(cnic_unregister_driver);
+
+static int cnic_init_id_tbl(struct cnic_id_tbl *id_tbl, u32 size, u32 start_id)
+{
+       id_tbl->start = start_id;
+       id_tbl->max = size;
+       id_tbl->next = 0;
+       spin_lock_init(&id_tbl->lock);
+       id_tbl->table = kzalloc(DIV_ROUND_UP(size, 32) * 4, GFP_KERNEL);
+       if (!id_tbl->table)
+               return -ENOMEM;
+
+       return 0;
+}
+
+static void cnic_free_id_tbl(struct cnic_id_tbl *id_tbl)
+{
+       kfree(id_tbl->table);
+       id_tbl->table = NULL;
+}
+
+static int cnic_alloc_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+       int ret = -1;
+
+       id -= id_tbl->start;
+       if (id >= id_tbl->max)
+               return ret;
+
+       spin_lock(&id_tbl->lock);
+       if (!test_bit(id, id_tbl->table)) {
+               set_bit(id, id_tbl->table);
+               ret = 0;
+       }
+       spin_unlock(&id_tbl->lock);
+       return ret;
+}
+
+/* Returns -1 if not successful */
+static u32 cnic_alloc_new_id(struct cnic_id_tbl *id_tbl)
+{
+       u32 id;
+
+       spin_lock(&id_tbl->lock);
+       id = find_next_zero_bit(id_tbl->table, id_tbl->max, id_tbl->next);
+       if (id >= id_tbl->max) {
+               id = -1;
+               if (id_tbl->next != 0) {
+                       id = find_first_zero_bit(id_tbl->table, id_tbl->next);
+                       if (id >= id_tbl->next)
+                               id = -1;
+               }
+       }
+
+       if (id < id_tbl->max) {
+               set_bit(id, id_tbl->table);
+               id_tbl->next = (id + 1) & (id_tbl->max - 1);
+               id += id_tbl->start;
+       }
+
+       spin_unlock(&id_tbl->lock);
+
+       return id;
+}
+
+static void cnic_free_id(struct cnic_id_tbl *id_tbl, u32 id)
+{
+       if (id == -1)
+               return;
+
+       id -= id_tbl->start;
+       if (id >= id_tbl->max)
+               return;
+
+       clear_bit(id, id_tbl->table);
+}
+
+static void cnic_free_dma(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+       int i;
+
+       if (!dma->pg_arr)
+               return;
+
+       for (i = 0; i < dma->num_pages; i++) {
+               if (dma->pg_arr[i]) {
+                       pci_free_consistent(dev->pcidev, BCM_PAGE_SIZE,
+                                           dma->pg_arr[i], dma->pg_map_arr[i]);
+                       dma->pg_arr[i] = NULL;
+               }
+       }
+       if (dma->pgtbl) {
+               pci_free_consistent(dev->pcidev, dma->pgtbl_size,
+                                   dma->pgtbl, dma->pgtbl_map);
+               dma->pgtbl = NULL;
+       }
+       kfree(dma->pg_arr);
+       dma->pg_arr = NULL;
+       dma->num_pages = 0;
+}
+
+static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
+{
+       int i;
+       u32 *page_table = dma->pgtbl;
+
+       for (i = 0; i < dma->num_pages; i++) {
+               /* Each entry needs to be in big endian format. */
+               *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
+               page_table++;
+               *page_table = (u32) dma->pg_map_arr[i];
+               page_table++;
+       }
+}
+
+static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
+                         int pages, int use_pg_tbl)
+{
+       int i, size;
+       struct cnic_local *cp = dev->cnic_priv;
+
+       size = pages * (sizeof(void *) + sizeof(dma_addr_t));
+       dma->pg_arr = kzalloc(size, GFP_ATOMIC);
+       if (dma->pg_arr == NULL)
+               return -ENOMEM;
+
+       dma->pg_map_arr = (dma_addr_t *) (dma->pg_arr + pages);
+       dma->num_pages = pages;
+
+       for (i = 0; i < pages; i++) {
+               dma->pg_arr[i] = pci_alloc_consistent(dev->pcidev,
+                                                     BCM_PAGE_SIZE,
+                                                     &dma->pg_map_arr[i]);
+               if (dma->pg_arr[i] == NULL)
+                       goto error;
+       }
+       if (!use_pg_tbl)
+               return 0;
+
+       dma->pgtbl_size = ((pages * 8) + BCM_PAGE_SIZE - 1) &
+                         ~(BCM_PAGE_SIZE - 1);
+       dma->pgtbl = pci_alloc_consistent(dev->pcidev, dma->pgtbl_size,
+                                         &dma->pgtbl_map);
+       if (dma->pgtbl == NULL)
+               goto error;
+
+       cp->setup_pgtbl(dev, dma);
+
+       return 0;
+
+error:
+       cnic_free_dma(dev, dma);
+       return -ENOMEM;
+}
+
+static void cnic_free_resc(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       int i = 0;
+
+       if (cp->cnic_uinfo) {
+               cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
+               while (cp->uio_dev != -1 && i < 15) {
+                       msleep(100);
+                       i++;
+               }
+               uio_unregister_device(cp->cnic_uinfo);
+               kfree(cp->cnic_uinfo);
+               cp->cnic_uinfo = NULL;
+       }
+
+       if (cp->l2_buf) {
+               pci_free_consistent(dev->pcidev, cp->l2_buf_size,
+                                   cp->l2_buf, cp->l2_buf_map);
+               cp->l2_buf = NULL;
+       }
+
+       if (cp->l2_ring) {
+               pci_free_consistent(dev->pcidev, cp->l2_ring_size,
+                                   cp->l2_ring, cp->l2_ring_map);
+               cp->l2_ring = NULL;
+       }
+
+       for (i = 0; i < cp->ctx_blks; i++) {
+               if (cp->ctx_arr[i].ctx) {
+                       pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
+                                           cp->ctx_arr[i].ctx,
+                                           cp->ctx_arr[i].mapping);
+                       cp->ctx_arr[i].ctx = NULL;
+               }
+       }
+       kfree(cp->ctx_arr);
+       cp->ctx_arr = NULL;
+       cp->ctx_blks = 0;
+
+       cnic_free_dma(dev, &cp->gbl_buf_info);
+       cnic_free_dma(dev, &cp->conn_buf_info);
+       cnic_free_dma(dev, &cp->kwq_info);
+       cnic_free_dma(dev, &cp->kcq_info);
+       kfree(cp->iscsi_tbl);
+       cp->iscsi_tbl = NULL;
+       kfree(cp->ctx_tbl);
+       cp->ctx_tbl = NULL;
+
+       cnic_free_id_tbl(&cp->cid_tbl);
+}
+
+static int cnic_alloc_context(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+
+       if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+               int i, k, arr_size;
+
+               cp->ctx_blk_size = BCM_PAGE_SIZE;
+               cp->cids_per_blk = BCM_PAGE_SIZE / 128;
+               arr_size = BNX2_MAX_CID / cp->cids_per_blk *
+                          sizeof(struct cnic_ctx);
+               cp->ctx_arr = kzalloc(arr_size, GFP_KERNEL);
+               if (cp->ctx_arr == NULL)
+                       return -ENOMEM;
+
+               k = 0;
+               for (i = 0; i < 2; i++) {
+                       u32 j, reg, off, lo, hi;
+
+                       if (i == 0)
+                               off = BNX2_PG_CTX_MAP;
+                       else
+                               off = BNX2_ISCSI_CTX_MAP;
+
+                       reg = cnic_reg_rd_ind(dev, off);
+                       lo = reg >> 16;
+                       hi = reg & 0xffff;
+                       for (j = lo; j < hi; j += cp->cids_per_blk, k++)
+                               cp->ctx_arr[k].cid = j;
+               }
+
+               cp->ctx_blks = k;
+               if (cp->ctx_blks >= (BNX2_MAX_CID / cp->cids_per_blk)) {
+                       cp->ctx_blks = 0;
+                       return -ENOMEM;
+               }
+
+               for (i = 0; i < cp->ctx_blks; i++) {
+                       cp->ctx_arr[i].ctx =
+                               pci_alloc_consistent(dev->pcidev, BCM_PAGE_SIZE,
+                                                    &cp->ctx_arr[i].mapping);
+                       if (cp->ctx_arr[i].ctx == NULL)
+                               return -ENOMEM;
+               }
+       }
+       return 0;
+}
+
+static int cnic_alloc_bnx2_resc(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct uio_info *uinfo;
+       int ret;
+
+       ret = cnic_alloc_dma(dev, &cp->kwq_info, KWQ_PAGE_CNT, 1);
+       if (ret)
+               goto error;
+       cp->kwq = (struct kwqe **) cp->kwq_info.pg_arr;
+
+       ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 1);
+       if (ret)
+               goto error;
+       cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
+
+       ret = cnic_alloc_context(dev);
+       if (ret)
+               goto error;
+
+       cp->l2_ring_size = 2 * BCM_PAGE_SIZE;
+       cp->l2_ring = pci_alloc_consistent(dev->pcidev, cp->l2_ring_size,
+                                          &cp->l2_ring_map);
+       if (!cp->l2_ring)
+               goto error;
+
+       cp->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
+       cp->l2_buf_size = PAGE_ALIGN(cp->l2_buf_size);
+       cp->l2_buf = pci_alloc_consistent(dev->pcidev, cp->l2_buf_size,
+                                          &cp->l2_buf_map);
+       if (!cp->l2_buf)
+               goto error;
+
+       uinfo = kzalloc(sizeof(*uinfo), GFP_ATOMIC);
+       if (!uinfo)
+               goto error;
+
+       uinfo->mem[0].addr = dev->netdev->base_addr;
+       uinfo->mem[0].internal_addr = dev->regview;
+       uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
+       uinfo->mem[0].memtype = UIO_MEM_PHYS;
+
+       uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
+       if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
+               uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
+       else
+               uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
+       uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->mem[2].addr = (unsigned long) cp->l2_ring;
+       uinfo->mem[2].size = cp->l2_ring_size;
+       uinfo->mem[2].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->mem[3].addr = (unsigned long) cp->l2_buf;
+       uinfo->mem[3].size = cp->l2_buf_size;
+       uinfo->mem[3].memtype = UIO_MEM_LOGICAL;
+
+       uinfo->name = "bnx2_cnic";
+       uinfo->version = CNIC_MODULE_VERSION;
+       uinfo->irq = UIO_IRQ_CUSTOM;
+
+       uinfo->open = cnic_uio_open;
+       uinfo->release = cnic_uio_close;
+
+       uinfo->priv = dev;
+
+       ret = uio_register_device(&dev->pcidev->dev, uinfo);
+       if (ret) {
+               kfree(uinfo);
+               goto error;
+       }
+
+       cp->cnic_uinfo = uinfo;
+
+       return 0;
+
+error:
+       cnic_free_resc(dev);
+       return ret;
+}
+
+static inline u32 cnic_kwq_avail(struct cnic_local *cp)
+{
+       return cp->max_kwq_idx -
+               ((cp->kwq_prod_idx - cp->kwq_con_idx) & cp->max_kwq_idx);
+}
+
+static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
+                                 u32 num_wqes)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct kwqe *prod_qe;
+       u16 prod, sw_prod, i;
+
+       if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
+               return -EAGAIN;         /* bnx2 is down */
+
+       spin_lock_bh(&cp->cnic_ulp_lock);
+       if (num_wqes > cnic_kwq_avail(cp) &&
+           !(cp->cnic_local_flags & CNIC_LCL_FL_KWQ_INIT)) {
+               spin_unlock_bh(&cp->cnic_ulp_lock);
+               return -EAGAIN;
+       }
+
+       cp->cnic_local_flags &= ~CNIC_LCL_FL_KWQ_INIT;
+
+       prod = cp->kwq_prod_idx;
+       sw_prod = prod & MAX_KWQ_IDX;
+       for (i = 0; i < num_wqes; i++) {
+               prod_qe = &cp->kwq[KWQ_PG(sw_prod)][KWQ_IDX(sw_prod)];
+               memcpy(prod_qe, wqes[i], sizeof(struct kwqe));
+               prod++;
+               sw_prod = prod & MAX_KWQ_IDX;
+       }
+       cp->kwq_prod_idx = prod;
+
+       CNIC_WR16(dev, cp->kwq_io_addr, cp->kwq_prod_idx);
+
+       spin_unlock_bh(&cp->cnic_ulp_lock);
+       return 0;
+}
+
+static void service_kcqes(struct cnic_dev *dev, int num_cqes)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       int i, j;
+
+       i = 0;
+       j = 1;
+       while (num_cqes) {
+               struct cnic_ulp_ops *ulp_ops;
+               int ulp_type;
+               u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
+               u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK;
+
+               if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
+                       cnic_kwq_completion(dev, 1);
+
+               while (j < num_cqes) {
+                       u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
+
+                       if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer)
+                               break;
+
+                       if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
+                               cnic_kwq_completion(dev, 1);
+                       j++;
+               }
+
+               if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_RDMA)
+                       ulp_type = CNIC_ULP_RDMA;
+               else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
+                       ulp_type = CNIC_ULP_ISCSI;
+               else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
+                       ulp_type = CNIC_ULP_L4;
+               else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
+                       goto end;
+               else {
+                       printk(KERN_ERR PFX "%s: Unknown type of KCQE(0x%x)\n",
+                              dev->netdev->name, kcqe_op_flag);
+                       goto end;
+               }
+
+               rcu_read_lock();
+               ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+               if (likely(ulp_ops)) {
+                       ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
+                                                 cp->completed_kcq + i, j);
+               }
+               rcu_read_unlock();
+end:
+               num_cqes -= j;
+               i += j;
+               j = 1;
+       }
+       return;
+}
+
+static u16 cnic_bnx2_next_idx(u16 idx)
+{
+       return idx + 1;
+}
+
+static u16 cnic_bnx2_hw_idx(u16 idx)
+{
+       return idx;
+}
+
+static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       u16 i, ri, last;
+       struct kcqe *kcqe;
+       int kcqe_cnt = 0, last_cnt = 0;
+
+       i = ri = last = *sw_prod;
+       ri &= MAX_KCQ_IDX;
+
+       while ((i != hw_prod) && (kcqe_cnt < MAX_COMPLETED_KCQE)) {
+               kcqe = &cp->kcq[KCQ_PG(ri)][KCQ_IDX(ri)];
+               cp->completed_kcq[kcqe_cnt++] = kcqe;
+               i = cp->next_idx(i);
+               ri = i & MAX_KCQ_IDX;
+               if (likely(!(kcqe->kcqe_op_flag & KCQE_FLAGS_NEXT))) {
+                       last_cnt = kcqe_cnt;
+                       last = i;
+               }
+       }
+
+       *sw_prod = last;
+       return last_cnt;
+}
+
+static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
+{
+       u16 rx_cons = *cp->rx_cons_ptr;
+       u16 tx_cons = *cp->tx_cons_ptr;
+
+       if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
+               cp->tx_cons = tx_cons;
+               cp->rx_cons = rx_cons;
+               uio_event_notify(cp->cnic_uinfo);
+       }
+}
+
+static int cnic_service_bnx2(void *data, void *status_blk)
+{
+       struct cnic_dev *dev = data;
+       struct status_block *sblk = status_blk;
+       struct cnic_local *cp = dev->cnic_priv;
+       u32 status_idx = sblk->status_idx;
+       u16 hw_prod, sw_prod;
+       int kcqe_cnt;
+
+       if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+               return status_idx;
+
+       cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+
+       hw_prod = sblk->status_completion_producer_index;
+       sw_prod = cp->kcq_prod_idx;
+       while (sw_prod != hw_prod) {
+               kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+               if (kcqe_cnt == 0)
+                       goto done;
+
+               service_kcqes(dev, kcqe_cnt);
+
+               /* Tell compiler that status_blk fields can change. */
+               barrier();
+               if (status_idx != sblk->status_idx) {
+                       status_idx = sblk->status_idx;
+                       cp->kwq_con_idx = *cp->kwq_con_idx_ptr;
+                       hw_prod = sblk->status_completion_producer_index;
+               } else
+                       break;
+       }
+
+done:
+       CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+
+       cp->kcq_prod_idx = sw_prod;
+
+       cnic_chk_bnx2_pkt_rings(cp);
+       return status_idx;
+}
+
+static void cnic_service_bnx2_msix(unsigned long data)
+{
+       struct cnic_dev *dev = (struct cnic_dev *) data;
+       struct cnic_local *cp = dev->cnic_priv;
+       struct status_block_msix *status_blk = cp->bnx2_status_blk;
+       u32 status_idx = status_blk->status_idx;
+       u16 hw_prod, sw_prod;
+       int kcqe_cnt;
+
+       cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+
+       hw_prod = status_blk->status_completion_producer_index;
+       sw_prod = cp->kcq_prod_idx;
+       while (sw_prod != hw_prod) {
+               kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
+               if (kcqe_cnt == 0)
+                       goto done;
+
+               service_kcqes(dev, kcqe_cnt);
+
+               /* Tell compiler that status_blk fields can change. */
+               barrier();
+               if (status_idx != status_blk->status_idx) {
+                       status_idx = status_blk->status_idx;
+                       cp->kwq_con_idx = status_blk->status_cmd_consumer_index;
+                       hw_prod = status_blk->status_completion_producer_index;
+               } else
+                       break;
+       }
+
+done:
+       CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
+       cp->kcq_prod_idx = sw_prod;
+
+       cnic_chk_bnx2_pkt_rings(cp);
+
+       cp->last_status_idx = status_idx;
+       CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static irqreturn_t cnic_irq(int irq, void *dev_instance)
+{
+       struct cnic_dev *dev = dev_instance;
+       struct cnic_local *cp = dev->cnic_priv;
+       u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
+
+       if (cp->ack_int)
+               cp->ack_int(dev);
+
+       prefetch(cp->status_blk);
+       prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
+
+       if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
+               tasklet_schedule(&cp->cnic_irq_task);
+
+       return IRQ_HANDLED;
+}
+
+static void cnic_ulp_stop(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       int if_type;
+
+       rcu_read_lock();
+       for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+               struct cnic_ulp_ops *ulp_ops;
+
+               ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+               if (!ulp_ops)
+                       continue;
+
+               if (test_and_clear_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+                       ulp_ops->cnic_stop(cp->ulp_handle[if_type]);
+       }
+       rcu_read_unlock();
+}
+
+static void cnic_ulp_start(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       int if_type;
+
+       rcu_read_lock();
+       for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+               struct cnic_ulp_ops *ulp_ops;
+
+               ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+               if (!ulp_ops || !ulp_ops->cnic_start)
+                       continue;
+
+               if (!test_and_set_bit(ULP_F_START, &cp->ulp_flags[if_type]))
+                       ulp_ops->cnic_start(cp->ulp_handle[if_type]);
+       }
+       rcu_read_unlock();
+}
+
+static int cnic_ctl(void *data, struct cnic_ctl_info *info)
+{
+       struct cnic_dev *dev = data;
+
+       switch (info->cmd) {
+       case CNIC_CTL_STOP_CMD:
+               cnic_hold(dev);
+               mutex_lock(&cnic_lock);
+
+               cnic_ulp_stop(dev);
+               cnic_stop_hw(dev);
+
+               mutex_unlock(&cnic_lock);
+               cnic_put(dev);
+               break;
+       case CNIC_CTL_START_CMD:
+               cnic_hold(dev);
+               mutex_lock(&cnic_lock);
+
+               if (!cnic_start_hw(dev))
+                       cnic_ulp_start(dev);
+
+               mutex_unlock(&cnic_lock);
+               cnic_put(dev);
+               break;
+       default:
+               return -EINVAL;
+       }
+       return 0;
+}
+
+static void cnic_ulp_init(struct cnic_dev *dev)
+{
+       int i;
+       struct cnic_local *cp = dev->cnic_priv;
+
+       rcu_read_lock();
+       for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+               struct cnic_ulp_ops *ulp_ops;
+
+               ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+               if (!ulp_ops || !ulp_ops->cnic_init)
+                       continue;
+
+               if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+                       ulp_ops->cnic_init(dev);
+
+       }
+       rcu_read_unlock();
+}
+
+static void cnic_ulp_exit(struct cnic_dev *dev)
+{
+       int i;
+       struct cnic_local *cp = dev->cnic_priv;
+
+       rcu_read_lock();
+       for (i = 0; i < MAX_CNIC_ULP_TYPE_EXT; i++) {
+               struct cnic_ulp_ops *ulp_ops;
+
+               ulp_ops = rcu_dereference(cnic_ulp_tbl[i]);
+               if (!ulp_ops || !ulp_ops->cnic_exit)
+                       continue;
+
+               if (test_and_clear_bit(ULP_F_INIT, &cp->ulp_flags[i]))
+                       ulp_ops->cnic_exit(dev);
+
+       }
+       rcu_read_unlock();
+}
+
+static int cnic_cm_offload_pg(struct cnic_sock *csk)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct l4_kwq_offload_pg *l4kwqe;
+       struct kwqe *wqes[1];
+
+       l4kwqe = (struct l4_kwq_offload_pg *) &csk->kwqe1;
+       memset(l4kwqe, 0, sizeof(*l4kwqe));
+       wqes[0] = (struct kwqe *) l4kwqe;
+
+       l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_OFFLOAD_PG;
+       l4kwqe->flags =
+               L4_LAYER_CODE << L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT;
+       l4kwqe->l2hdr_nbytes = ETH_HLEN;
+
+       l4kwqe->da0 = csk->ha[0];
+       l4kwqe->da1 = csk->ha[1];
+       l4kwqe->da2 = csk->ha[2];
+       l4kwqe->da3 = csk->ha[3];
+       l4kwqe->da4 = csk->ha[4];
+       l4kwqe->da5 = csk->ha[5];
+
+       l4kwqe->sa0 = dev->mac_addr[0];
+       l4kwqe->sa1 = dev->mac_addr[1];
+       l4kwqe->sa2 = dev->mac_addr[2];
+       l4kwqe->sa3 = dev->mac_addr[3];
+       l4kwqe->sa4 = dev->mac_addr[4];
+       l4kwqe->sa5 = dev->mac_addr[5];
+
+       l4kwqe->etype = ETH_P_IP;
+       l4kwqe->ipid_count = DEF_IPID_COUNT;
+       l4kwqe->host_opaque = csk->l5_cid;
+
+       if (csk->vlan_id) {
+               l4kwqe->pg_flags |= L4_KWQ_OFFLOAD_PG_VLAN_TAGGING;
+               l4kwqe->vlan_tag = csk->vlan_id;
+               l4kwqe->l2hdr_nbytes += 4;
+       }
+
+       return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_update_pg(struct cnic_sock *csk)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct l4_kwq_update_pg *l4kwqe;
+       struct kwqe *wqes[1];
+
+       l4kwqe = (struct l4_kwq_update_pg *) &csk->kwqe1;
+       memset(l4kwqe, 0, sizeof(*l4kwqe));
+       wqes[0] = (struct kwqe *) l4kwqe;
+
+       l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPDATE_PG;
+       l4kwqe->flags =
+               L4_LAYER_CODE << L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT;
+       l4kwqe->pg_cid = csk->pg_cid;
+
+       l4kwqe->da0 = csk->ha[0];
+       l4kwqe->da1 = csk->ha[1];
+       l4kwqe->da2 = csk->ha[2];
+       l4kwqe->da3 = csk->ha[3];
+       l4kwqe->da4 = csk->ha[4];
+       l4kwqe->da5 = csk->ha[5];
+
+       l4kwqe->pg_host_opaque = csk->l5_cid;
+       l4kwqe->pg_valids = L4_KWQ_UPDATE_PG_VALIDS_DA;
+
+       return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_upload_pg(struct cnic_sock *csk)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct l4_kwq_upload *l4kwqe;
+       struct kwqe *wqes[1];
+
+       l4kwqe = (struct l4_kwq_upload *) &csk->kwqe1;
+       memset(l4kwqe, 0, sizeof(*l4kwqe));
+       wqes[0] = (struct kwqe *) l4kwqe;
+
+       l4kwqe->opcode = L4_KWQE_OPCODE_VALUE_UPLOAD_PG;
+       l4kwqe->flags =
+               L4_LAYER_CODE << L4_KWQ_UPLOAD_LAYER_CODE_SHIFT;
+       l4kwqe->cid = csk->pg_cid;
+
+       return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_conn_req(struct cnic_sock *csk)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct l4_kwq_connect_req1 *l4kwqe1;
+       struct l4_kwq_connect_req2 *l4kwqe2;
+       struct l4_kwq_connect_req3 *l4kwqe3;
+       struct kwqe *wqes[3];
+       u8 tcp_flags = 0;
+       int num_wqes = 2;
+
+       l4kwqe1 = (struct l4_kwq_connect_req1 *) &csk->kwqe1;
+       l4kwqe2 = (struct l4_kwq_connect_req2 *) &csk->kwqe2;
+       l4kwqe3 = (struct l4_kwq_connect_req3 *) &csk->kwqe3;
+       memset(l4kwqe1, 0, sizeof(*l4kwqe1));
+       memset(l4kwqe2, 0, sizeof(*l4kwqe2));
+       memset(l4kwqe3, 0, sizeof(*l4kwqe3));
+
+       l4kwqe3->op_code = L4_KWQE_OPCODE_VALUE_CONNECT3;
+       l4kwqe3->flags =
+               L4_LAYER_CODE << L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT;
+       l4kwqe3->ka_timeout = csk->ka_timeout;
+       l4kwqe3->ka_interval = csk->ka_interval;
+       l4kwqe3->ka_max_probe_count = csk->ka_max_probe_count;
+       l4kwqe3->tos = csk->tos;
+       l4kwqe3->ttl = csk->ttl;
+       l4kwqe3->snd_seq_scale = csk->snd_seq_scale;
+       l4kwqe3->pmtu = csk->mtu;
+       l4kwqe3->rcv_buf = csk->rcv_buf;
+       l4kwqe3->snd_buf = csk->snd_buf;
+       l4kwqe3->seed = csk->seed;
+
+       wqes[0] = (struct kwqe *) l4kwqe1;
+       if (test_bit(SK_F_IPV6, &csk->flags)) {
+               wqes[1] = (struct kwqe *) l4kwqe2;
+               wqes[2] = (struct kwqe *) l4kwqe3;
+               num_wqes = 3;
+
+               l4kwqe1->conn_flags = L4_KWQ_CONNECT_REQ1_IP_V6;
+               l4kwqe2->op_code = L4_KWQE_OPCODE_VALUE_CONNECT2;
+               l4kwqe2->flags =
+                       L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT |
+                       L4_LAYER_CODE << L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT;
+               l4kwqe2->src_ip_v6_2 = be32_to_cpu(csk->src_ip[1]);
+               l4kwqe2->src_ip_v6_3 = be32_to_cpu(csk->src_ip[2]);
+               l4kwqe2->src_ip_v6_4 = be32_to_cpu(csk->src_ip[3]);
+               l4kwqe2->dst_ip_v6_2 = be32_to_cpu(csk->dst_ip[1]);
+               l4kwqe2->dst_ip_v6_3 = be32_to_cpu(csk->dst_ip[2]);
+               l4kwqe2->dst_ip_v6_4 = be32_to_cpu(csk->dst_ip[3]);
+               l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct ipv6hdr) -
+                              sizeof(struct tcphdr);
+       } else {
+               wqes[1] = (struct kwqe *) l4kwqe3;
+               l4kwqe3->mss = l4kwqe3->pmtu - sizeof(struct iphdr) -
+                              sizeof(struct tcphdr);
+       }
+
+       l4kwqe1->op_code = L4_KWQE_OPCODE_VALUE_CONNECT1;
+       l4kwqe1->flags =
+               (L4_LAYER_CODE << L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT) |
+                L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT;
+       l4kwqe1->cid = csk->cid;
+       l4kwqe1->pg_cid = csk->pg_cid;
+       l4kwqe1->src_ip = be32_to_cpu(csk->src_ip[0]);
+       l4kwqe1->dst_ip = be32_to_cpu(csk->dst_ip[0]);
+       l4kwqe1->src_port = be16_to_cpu(csk->src_port);
+       l4kwqe1->dst_port = be16_to_cpu(csk->dst_port);
+       if (csk->tcp_flags & SK_TCP_NO_DELAY_ACK)
+               tcp_flags |= L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK;
+       if (csk->tcp_flags & SK_TCP_KEEP_ALIVE)
+               tcp_flags |= L4_KWQ_CONNECT_REQ1_KEEP_ALIVE;
+       if (csk->tcp_flags & SK_TCP_NAGLE)
+               tcp_flags |= L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE;
+       if (csk->tcp_flags & SK_TCP_TIMESTAMP)
+               tcp_flags |= L4_KWQ_CONNECT_REQ1_TIME_STAMP;
+       if (csk->tcp_flags & SK_TCP_SACK)
+               tcp_flags |= L4_KWQ_CONNECT_REQ1_SACK;
+       if (csk->tcp_flags & SK_TCP_SEG_SCALING)
+               tcp_flags |= L4_KWQ_CONNECT_REQ1_SEG_SCALING;
+
+       l4kwqe1->tcp_flags = tcp_flags;
+
+       return dev->submit_kwqes(dev, wqes, num_wqes);
+}
+
+static int cnic_cm_close_req(struct cnic_sock *csk)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct l4_kwq_close_req *l4kwqe;
+       struct kwqe *wqes[1];
+
+       l4kwqe = (struct l4_kwq_close_req *) &csk->kwqe2;
+       memset(l4kwqe, 0, sizeof(*l4kwqe));
+       wqes[0] = (struct kwqe *) l4kwqe;
+
+       l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_CLOSE;
+       l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT;
+       l4kwqe->cid = csk->cid;
+
+       return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_abort_req(struct cnic_sock *csk)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct l4_kwq_reset_req *l4kwqe;
+       struct kwqe *wqes[1];
+
+       l4kwqe = (struct l4_kwq_reset_req *) &csk->kwqe2;
+       memset(l4kwqe, 0, sizeof(*l4kwqe));
+       wqes[0] = (struct kwqe *) l4kwqe;
+
+       l4kwqe->op_code = L4_KWQE_OPCODE_VALUE_RESET;
+       l4kwqe->flags = L4_LAYER_CODE << L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT;
+       l4kwqe->cid = csk->cid;
+
+       return dev->submit_kwqes(dev, wqes, 1);
+}
+
+static int cnic_cm_create(struct cnic_dev *dev, int ulp_type, u32 cid,
+                         u32 l5_cid, struct cnic_sock **csk, void *context)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_sock *csk1;
+
+       if (l5_cid >= MAX_CM_SK_TBL_SZ)
+               return -EINVAL;
+
+       csk1 = &cp->csk_tbl[l5_cid];
+       if (atomic_read(&csk1->ref_count))
+               return -EAGAIN;
+
+       if (test_and_set_bit(SK_F_INUSE, &csk1->flags))
+               return -EBUSY;
+
+       csk1->dev = dev;
+       csk1->cid = cid;
+       csk1->l5_cid = l5_cid;
+       csk1->ulp_type = ulp_type;
+       csk1->context = context;
+
+       csk1->ka_timeout = DEF_KA_TIMEOUT;
+       csk1->ka_interval = DEF_KA_INTERVAL;
+       csk1->ka_max_probe_count = DEF_KA_MAX_PROBE_COUNT;
+       csk1->tos = DEF_TOS;
+       csk1->ttl = DEF_TTL;
+       csk1->snd_seq_scale = DEF_SND_SEQ_SCALE;
+       csk1->rcv_buf = DEF_RCV_BUF;
+       csk1->snd_buf = DEF_SND_BUF;
+       csk1->seed = DEF_SEED;
+
+       *csk = csk1;
+       return 0;
+}
+
+static void cnic_cm_cleanup(struct cnic_sock *csk)
+{
+       if (csk->src_port) {
+               struct cnic_dev *dev = csk->dev;
+               struct cnic_local *cp = dev->cnic_priv;
+
+               cnic_free_id(&cp->csk_port_tbl, csk->src_port);
+               csk->src_port = 0;
+       }
+}
+
+static void cnic_close_conn(struct cnic_sock *csk)
+{
+       if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags)) {
+               cnic_cm_upload_pg(csk);
+               clear_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+       }
+       cnic_cm_cleanup(csk);
+}
+
+static int cnic_cm_destroy(struct cnic_sock *csk)
+{
+       if (!cnic_in_use(csk))
+               return -EINVAL;
+
+       csk_hold(csk);
+       clear_bit(SK_F_INUSE, &csk->flags);
+       smp_mb__after_clear_bit();
+       while (atomic_read(&csk->ref_count) != 1)
+               msleep(1);
+       cnic_cm_cleanup(csk);
+
+       csk->flags = 0;
+       csk_put(csk);
+       return 0;
+}
+
+static inline u16 cnic_get_vlan(struct net_device *dev,
+                               struct net_device **vlan_dev)
+{
+       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+               *vlan_dev = vlan_dev_real_dev(dev);
+               return vlan_dev_vlan_id(dev);
+       }
+       *vlan_dev = dev;
+       return 0;
+}
+
+static int cnic_get_v4_route(struct sockaddr_in *dst_addr,
+                            struct dst_entry **dst)
+{
+       struct flowi fl;
+       int err;
+       struct rtable *rt;
+
+       memset(&fl, 0, sizeof(fl));
+       fl.nl_u.ip4_u.daddr = dst_addr->sin_addr.s_addr;
+
+       err = ip_route_output_key(&init_net, &rt, &fl);
+       if (!err)
+               *dst = &rt->u.dst;
+       return err;
+}
+
+static int cnic_get_v6_route(struct sockaddr_in6 *dst_addr,
+                            struct dst_entry **dst)
+{
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+       struct flowi fl;
+
+       memset(&fl, 0, sizeof(fl));
+       ipv6_addr_copy(&fl.fl6_dst, &dst_addr->sin6_addr);
+       if (ipv6_addr_type(&fl.fl6_dst) & IPV6_ADDR_LINKLOCAL)
+               fl.oif = dst_addr->sin6_scope_id;
+
+       *dst = ip6_route_output(&init_net, NULL, &fl);
+       if (*dst)
+               return 0;
+#endif
+
+       return -ENETUNREACH;
+}
+
+static struct cnic_dev *cnic_cm_select_dev(struct sockaddr_in *dst_addr,
+                                          int ulp_type)
+{
+       struct cnic_dev *dev = NULL;
+       struct dst_entry *dst;
+       struct net_device *netdev = NULL;
+       int err = -ENETUNREACH;
+
+       if (dst_addr->sin_family == AF_INET)
+               err = cnic_get_v4_route(dst_addr, &dst);
+       else if (dst_addr->sin_family == AF_INET6) {
+               struct sockaddr_in6 *dst_addr6 =
+                       (struct sockaddr_in6 *) dst_addr;
+
+               err = cnic_get_v6_route(dst_addr6, &dst);
+       } else
+               return NULL;
+
+       if (err)
+               return NULL;
+
+       if (!dst->dev)
+               goto done;
+
+       cnic_get_vlan(dst->dev, &netdev);
+
+       dev = cnic_from_netdev(netdev);
+
+done:
+       dst_release(dst);
+       if (dev)
+               cnic_put(dev);
+       return dev;
+}
+
+static int cnic_resolve_addr(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct cnic_local *cp = dev->cnic_priv;
+
+       return cnic_send_nlmsg(cp, ISCSI_KEVENT_PATH_REQ, csk);
+}
+
+static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct cnic_local *cp = dev->cnic_priv;
+       int is_v6, err, rc = -ENETUNREACH;
+       struct dst_entry *dst;
+       struct net_device *realdev;
+       u32 local_port;
+
+       if (saddr->local.v6.sin6_family == AF_INET6 &&
+           saddr->remote.v6.sin6_family == AF_INET6)
+               is_v6 = 1;
+       else if (saddr->local.v4.sin_family == AF_INET &&
+                saddr->remote.v4.sin_family == AF_INET)
+               is_v6 = 0;
+       else
+               return -EINVAL;
+
+       clear_bit(SK_F_IPV6, &csk->flags);
+
+       if (is_v6) {
+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
+               set_bit(SK_F_IPV6, &csk->flags);
+               err = cnic_get_v6_route(&saddr->remote.v6, &dst);
+               if (err)
+                       return err;
+
+               if (!dst || dst->error || !dst->dev)
+                       goto err_out;
+
+               memcpy(&csk->dst_ip[0], &saddr->remote.v6.sin6_addr,
+                      sizeof(struct in6_addr));
+               csk->dst_port = saddr->remote.v6.sin6_port;
+               local_port = saddr->local.v6.sin6_port;
+#else
+               return rc;
+#endif
+
+       } else {
+               err = cnic_get_v4_route(&saddr->remote.v4, &dst);
+               if (err)
+                       return err;
+
+               if (!dst || dst->error || !dst->dev)
+                       goto err_out;
+
+               csk->dst_ip[0] = saddr->remote.v4.sin_addr.s_addr;
+               csk->dst_port = saddr->remote.v4.sin_port;
+               local_port = saddr->local.v4.sin_port;
+       }
+
+       csk->vlan_id = cnic_get_vlan(dst->dev, &realdev);
+       if (realdev != dev->netdev)
+               goto err_out;
+
+       if (local_port >= CNIC_LOCAL_PORT_MIN &&
+           local_port < CNIC_LOCAL_PORT_MAX) {
+               if (cnic_alloc_id(&cp->csk_port_tbl, local_port))
+                       local_port = 0;
+       } else
+               local_port = 0;
+
+       if (!local_port) {
+               local_port = cnic_alloc_new_id(&cp->csk_port_tbl);
+               if (local_port == -1) {
+                       rc = -ENOMEM;
+                       goto err_out;
+               }
+       }
+       csk->src_port = local_port;
+
+       csk->mtu = dst_mtu(dst);
+       rc = 0;
+
+err_out:
+       dst_release(dst);
+       return rc;
+}
+
+static void cnic_init_csk_state(struct cnic_sock *csk)
+{
+       csk->state = 0;
+       clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+       clear_bit(SK_F_CLOSING, &csk->flags);
+}
+
+static int cnic_cm_connect(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
+{
+       int err = 0;
+
+       if (!cnic_in_use(csk))
+               return -EINVAL;
+
+       if (test_and_set_bit(SK_F_CONNECT_START, &csk->flags))
+               return -EINVAL;
+
+       cnic_init_csk_state(csk);
+
+       err = cnic_get_route(csk, saddr);
+       if (err)
+               goto err_out;
+
+       err = cnic_resolve_addr(csk, saddr);
+       if (!err)
+               return 0;
+
+err_out:
+       clear_bit(SK_F_CONNECT_START, &csk->flags);
+       return err;
+}
+
+static int cnic_cm_abort(struct cnic_sock *csk)
+{
+       struct cnic_local *cp = csk->dev->cnic_priv;
+       u32 opcode;
+
+       if (!cnic_in_use(csk))
+               return -EINVAL;
+
+       if (cnic_abort_prep(csk))
+               return cnic_cm_abort_req(csk);
+
+       /* Getting here means that we haven't started connect, or
+        * connect was not successful.
+        */
+
+       csk->state = L4_KCQE_OPCODE_VALUE_RESET_COMP;
+       if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+               opcode = csk->state;
+       else
+               opcode = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
+       cp->close_conn(csk, opcode);
+
+       return 0;
+}
+
+static int cnic_cm_close(struct cnic_sock *csk)
+{
+       if (!cnic_in_use(csk))
+               return -EINVAL;
+
+       if (cnic_close_prep(csk)) {
+               csk->state = L4_KCQE_OPCODE_VALUE_CLOSE_COMP;
+               return cnic_cm_close_req(csk);
+       }
+       return 0;
+}
+
+static void cnic_cm_upcall(struct cnic_local *cp, struct cnic_sock *csk,
+                          u8 opcode)
+{
+       struct cnic_ulp_ops *ulp_ops;
+       int ulp_type = csk->ulp_type;
+
+       rcu_read_lock();
+       ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
+       if (ulp_ops) {
+               if (opcode == L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE)
+                       ulp_ops->cm_connect_complete(csk);
+               else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)
+                       ulp_ops->cm_close_complete(csk);
+               else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED)
+                       ulp_ops->cm_remote_abort(csk);
+               else if (opcode == L4_KCQE_OPCODE_VALUE_RESET_COMP)
+                       ulp_ops->cm_abort_complete(csk);
+               else if (opcode == L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED)
+                       ulp_ops->cm_remote_close(csk);
+       }
+       rcu_read_unlock();
+}
+
+static int cnic_cm_set_pg(struct cnic_sock *csk)
+{
+       if (cnic_offld_prep(csk)) {
+               if (test_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags))
+                       cnic_cm_update_pg(csk);
+               else
+                       cnic_cm_offload_pg(csk);
+       }
+       return 0;
+}
+
+static void cnic_cm_process_offld_pg(struct cnic_dev *dev, struct l4_kcq *kcqe)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       u32 l5_cid = kcqe->pg_host_opaque;
+       u8 opcode = kcqe->op_code;
+       struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
+
+       csk_hold(csk);
+       if (!cnic_in_use(csk))
+               goto done;
+
+       if (opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+               clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+               goto done;
+       }
+       csk->pg_cid = kcqe->pg_cid;
+       set_bit(SK_F_PG_OFFLD_COMPLETE, &csk->flags);
+       cnic_cm_conn_req(csk);
+
+done:
+       csk_put(csk);
+}
+
+static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct l4_kcq *l4kcqe = (struct l4_kcq *) kcqe;
+       u8 opcode = l4kcqe->op_code;
+       u32 l5_cid;
+       struct cnic_sock *csk;
+
+       if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
+           opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
+               cnic_cm_process_offld_pg(dev, l4kcqe);
+               return;
+       }
+
+       l5_cid = l4kcqe->conn_id;
+       if (opcode & 0x80)
+               l5_cid = l4kcqe->cid;
+       if (l5_cid >= MAX_CM_SK_TBL_SZ)
+               return;
+
+       csk = &cp->csk_tbl[l5_cid];
+       csk_hold(csk);
+
+       if (!cnic_in_use(csk)) {
+               csk_put(csk);
+               return;
+       }
+
+       switch (opcode) {
+       case L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE:
+               if (l4kcqe->status == 0)
+                       set_bit(SK_F_OFFLD_COMPLETE, &csk->flags);
+
+               smp_mb__before_clear_bit();
+               clear_bit(SK_F_OFFLD_SCHED, &csk->flags);
+               cnic_cm_upcall(cp, csk, opcode);
+               break;
+
+       case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
+               if (test_and_clear_bit(SK_F_OFFLD_COMPLETE, &csk->flags))
+                       csk->state = opcode;
+               /* fall through */
+       case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
+       case L4_KCQE_OPCODE_VALUE_RESET_COMP:
+               cp->close_conn(csk, opcode);
+               break;
+
+       case L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED:
+               cnic_cm_upcall(cp, csk, opcode);
+               break;
+       }
+       csk_put(csk);
+}
+
+static void cnic_cm_indicate_kcqe(void *data, struct kcqe *kcqe[], u32 num)
+{
+       struct cnic_dev *dev = data;
+       int i;
+
+       for (i = 0; i < num; i++)
+               cnic_cm_process_kcqe(dev, kcqe[i]);
+}
+
+static struct cnic_ulp_ops cm_ulp_ops = {
+       .indicate_kcqes         = cnic_cm_indicate_kcqe,
+};
+
+static void cnic_cm_free_mem(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+
+       kfree(cp->csk_tbl);
+       cp->csk_tbl = NULL;
+       cnic_free_id_tbl(&cp->csk_port_tbl);
+}
+
+static int cnic_cm_alloc_mem(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+
+       cp->csk_tbl = kzalloc(sizeof(struct cnic_sock) * MAX_CM_SK_TBL_SZ,
+                             GFP_KERNEL);
+       if (!cp->csk_tbl)
+               return -ENOMEM;
+
+       if (cnic_init_id_tbl(&cp->csk_port_tbl, CNIC_LOCAL_PORT_RANGE,
+                            CNIC_LOCAL_PORT_MIN)) {
+               cnic_cm_free_mem(dev);
+               return -ENOMEM;
+       }
+       return 0;
+}
+
+static int cnic_ready_to_close(struct cnic_sock *csk, u32 opcode)
+{
+       if ((opcode == csk->state) ||
+           (opcode == L4_KCQE_OPCODE_VALUE_RESET_RECEIVED &&
+            csk->state == L4_KCQE_OPCODE_VALUE_CLOSE_COMP)) {
+               if (!test_and_set_bit(SK_F_CLOSING, &csk->flags))
+                       return 1;
+       }
+       return 0;
+}
+
+static void cnic_close_bnx2_conn(struct cnic_sock *csk, u32 opcode)
+{
+       struct cnic_dev *dev = csk->dev;
+       struct cnic_local *cp = dev->cnic_priv;
+
+       clear_bit(SK_F_CONNECT_START, &csk->flags);
+       if (cnic_ready_to_close(csk, opcode)) {
+               cnic_close_conn(csk);
+               cnic_cm_upcall(cp, csk, opcode);
+       }
+}
+
+static void cnic_cm_stop_bnx2_hw(struct cnic_dev *dev)
+{
+}
+
+static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
+{
+       u32 seed;
+
+       get_random_bytes(&seed, 4);
+       cnic_ctx_wr(dev, 45, 0, seed);
+       return 0;
+}
+
+static int cnic_cm_open(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       int err;
+
+       err = cnic_cm_alloc_mem(dev);
+       if (err)
+               return err;
+
+       err = cp->start_cm(dev);
+
+       if (err)
+               goto err_out;
+
+       dev->cm_create = cnic_cm_create;
+       dev->cm_destroy = cnic_cm_destroy;
+       dev->cm_connect = cnic_cm_connect;
+       dev->cm_abort = cnic_cm_abort;
+       dev->cm_close = cnic_cm_close;
+       dev->cm_select_dev = cnic_cm_select_dev;
+
+       cp->ulp_handle[CNIC_ULP_L4] = dev;
+       rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], &cm_ulp_ops);
+       return 0;
+
+err_out:
+       cnic_cm_free_mem(dev);
+       return err;
+}
+
+static int cnic_cm_shutdown(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       int i;
+
+       cp->stop_cm(dev);
+
+       if (!cp->csk_tbl)
+               return 0;
+
+       for (i = 0; i < MAX_CM_SK_TBL_SZ; i++) {
+               struct cnic_sock *csk = &cp->csk_tbl[i];
+
+               clear_bit(SK_F_INUSE, &csk->flags);
+               cnic_cm_cleanup(csk);
+       }
+       cnic_cm_free_mem(dev);
+
+       return 0;
+}
+
+static void cnic_init_context(struct cnic_dev *dev, u32 cid)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       u32 cid_addr;
+       int i;
+
+       if (CHIP_NUM(cp) == CHIP_NUM_5709)
+               return;
+
+       cid_addr = GET_CID_ADDR(cid);
+
+       for (i = 0; i < CTX_SIZE; i += 4)
+               cnic_ctx_wr(dev, cid_addr, i, 0);
+}
+
+static int cnic_setup_5709_context(struct cnic_dev *dev, int valid)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       int ret = 0, i;
+       u32 valid_bit = valid ? BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID : 0;
+
+       if (CHIP_NUM(cp) != CHIP_NUM_5709)
+               return 0;
+
+       for (i = 0; i < cp->ctx_blks; i++) {
+               int j;
+               u32 idx = cp->ctx_arr[i].cid / cp->cids_per_blk;
+               u32 val;
+
+               memset(cp->ctx_arr[i].ctx, 0, BCM_PAGE_SIZE);
+
+               CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA0,
+                       (cp->ctx_arr[i].mapping & 0xffffffff) | valid_bit);
+               CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_DATA1,
+                       (u64) cp->ctx_arr[i].mapping >> 32);
+               CNIC_WR(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL, idx |
+                       BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
+               for (j = 0; j < 10; j++) {
+
+                       val = CNIC_RD(dev, BNX2_CTX_HOST_PAGE_TBL_CTRL);
+                       if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
+                               break;
+                       udelay(5);
+               }
+               if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
+                       ret = -EBUSY;
+                       break;
+               }
+       }
+       return ret;
+}
+
+static void cnic_free_irq(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+
+       if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+               cp->disable_int_sync(dev);
+               tasklet_disable(&cp->cnic_irq_task);
+               free_irq(ethdev->irq_arr[0].vector, dev);
+       }
+}
+
+static int cnic_init_bnx2_irq(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+
+       if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+               int err, i = 0;
+               int sblk_num = cp->status_blk_num;
+               u32 base = ((sblk_num - 1) * BNX2_HC_SB_CONFIG_SIZE) +
+                          BNX2_HC_SB_CONFIG_1;
+
+               CNIC_WR(dev, base, BNX2_HC_SB_CONFIG_1_ONE_SHOT);
+
+               CNIC_WR(dev, base + BNX2_HC_COMP_PROD_TRIP_OFF, (2 << 16) | 8);
+               CNIC_WR(dev, base + BNX2_HC_COM_TICKS_OFF, (64 << 16) | 220);
+               CNIC_WR(dev, base + BNX2_HC_CMD_TICKS_OFF, (64 << 16) | 220);
+
+               cp->bnx2_status_blk = cp->status_blk;
+               cp->last_status_idx = cp->bnx2_status_blk->status_idx;
+               tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2_msix,
+                            (unsigned long) dev);
+               err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
+                                 "cnic", dev);
+               if (err) {
+                       tasklet_disable(&cp->cnic_irq_task);
+                       return err;
+               }
+               while (cp->bnx2_status_blk->status_completion_producer_index &&
+                      i < 10) {
+                       CNIC_WR(dev, BNX2_HC_COALESCE_NOW,
+                               1 << (11 + sblk_num));
+                       udelay(10);
+                       i++;
+                       barrier();
+               }
+               if (cp->bnx2_status_blk->status_completion_producer_index) {
+                       cnic_free_irq(dev);
+                       goto failed;
+               }
+
+       } else {
+               struct status_block *sblk = cp->status_blk;
+               u32 hc_cmd = CNIC_RD(dev, BNX2_HC_COMMAND);
+               int i = 0;
+
+               while (sblk->status_completion_producer_index && i < 10) {
+                       CNIC_WR(dev, BNX2_HC_COMMAND,
+                               hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
+                       udelay(10);
+                       i++;
+                       barrier();
+               }
+               if (sblk->status_completion_producer_index)
+                       goto failed;
+
+       }
+       return 0;
+
+failed:
+       printk(KERN_ERR PFX "%s: " "KCQ index not resetting to 0.\n",
+              dev->netdev->name);
+       return -EBUSY;
+}
+
+static void cnic_enable_bnx2_int(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+
+       if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+               return;
+
+       CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+               BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | cp->last_status_idx);
+}
+
+static void cnic_disable_bnx2_int_sync(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+
+       if (!(ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX))
+               return;
+
+       CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
+               BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
+       CNIC_RD(dev, BNX2_PCICFG_INT_ACK_CMD);
+       synchronize_irq(ethdev->irq_arr[0].vector);
+}
+
+static void cnic_init_bnx2_tx_ring(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       u32 cid_addr, tx_cid, sb_id;
+       u32 val, offset0, offset1, offset2, offset3;
+       int i;
+       struct tx_bd *txbd;
+       dma_addr_t buf_map;
+       struct status_block *s_blk = cp->status_blk;
+
+       sb_id = cp->status_blk_num;
+       tx_cid = 20;
+       cnic_init_context(dev, tx_cid);
+       cnic_init_context(dev, tx_cid + 1);
+       cp->tx_cons_ptr = &s_blk->status_tx_quick_consumer_index2;
+       if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+               struct status_block_msix *sblk = cp->status_blk;
+
+               tx_cid = TX_TSS_CID + sb_id - 1;
+               cnic_init_context(dev, tx_cid);
+               CNIC_WR(dev, BNX2_TSCH_TSS_CFG, (sb_id << 24) |
+                       (TX_TSS_CID << 7));
+               cp->tx_cons_ptr = &sblk->status_tx_quick_consumer_index;
+       }
+       cp->tx_cons = *cp->tx_cons_ptr;
+
+       cid_addr = GET_CID_ADDR(tx_cid);
+       if (CHIP_NUM(cp) == CHIP_NUM_5709) {
+               u32 cid_addr2 = GET_CID_ADDR(tx_cid + 4) + 0x40;
+
+               for (i = 0; i < PHY_CTX_SIZE; i += 4)
+                       cnic_ctx_wr(dev, cid_addr2, i, 0);
+
+               offset0 = BNX2_L2CTX_TYPE_XI;
+               offset1 = BNX2_L2CTX_CMD_TYPE_XI;
+               offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
+               offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
+       } else {
+               offset0 = BNX2_L2CTX_TYPE;
+               offset1 = BNX2_L2CTX_CMD_TYPE;
+               offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
+               offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
+       }
+       val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
+       cnic_ctx_wr(dev, cid_addr, offset0, val);
+
+       val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
+       cnic_ctx_wr(dev, cid_addr, offset1, val);
+
+       txbd = (struct tx_bd *) cp->l2_ring;
+
+       buf_map = cp->l2_buf_map;
+       for (i = 0; i < MAX_TX_DESC_CNT; i++, txbd++) {
+               txbd->tx_bd_haddr_hi = (u64) buf_map >> 32;
+               txbd->tx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+       }
+       val = (u64) cp->l2_ring_map >> 32;
+       cnic_ctx_wr(dev, cid_addr, offset2, val);
+       txbd->tx_bd_haddr_hi = val;
+
+       val = (u64) cp->l2_ring_map & 0xffffffff;
+       cnic_ctx_wr(dev, cid_addr, offset3, val);
+       txbd->tx_bd_haddr_lo = val;
+}
+
+static void cnic_init_bnx2_rx_ring(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       u32 cid_addr, sb_id, val, coal_reg, coal_val;
+       int i;
+       struct rx_bd *rxbd;
+       struct status_block *s_blk = cp->status_blk;
+
+       sb_id = cp->status_blk_num;
+       cnic_init_context(dev, 2);
+       cp->rx_cons_ptr = &s_blk->status_rx_quick_consumer_index2;
+       coal_reg = BNX2_HC_COMMAND;
+       coal_val = CNIC_RD(dev, coal_reg);
+       if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+               struct status_block_msix *sblk = cp->status_blk;
+
+               cp->rx_cons_ptr = &sblk->status_rx_quick_consumer_index;
+               coal_reg = BNX2_HC_COALESCE_NOW;
+               coal_val = 1 << (11 + sb_id);
+       }
+       i = 0;
+       while (!(*cp->rx_cons_ptr != 0) && i < 10) {
+               CNIC_WR(dev, coal_reg, coal_val);
+               udelay(10);
+               i++;
+               barrier();
+       }
+       cp->rx_cons = *cp->rx_cons_ptr;
+
+       cid_addr = GET_CID_ADDR(2);
+       val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE |
+             BNX2_L2CTX_CTX_TYPE_SIZE_L2 | (0x02 << 8);
+       cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_CTX_TYPE, val);
+
+       if (sb_id == 0)
+               val = 2 << BNX2_L2CTX_STATUSB_NUM_SHIFT;
+       else
+               val = BNX2_L2CTX_STATUSB_NUM(sb_id);
+       cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_HOST_BDIDX, val);
+
+       rxbd = (struct rx_bd *) (cp->l2_ring + BCM_PAGE_SIZE);
+       for (i = 0; i < MAX_RX_DESC_CNT; i++, rxbd++) {
+               dma_addr_t buf_map;
+               int n = (i % cp->l2_rx_ring_size) + 1;
+
+               buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
+               rxbd->rx_bd_len = cp->l2_single_buf_size;
+               rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
+               rxbd->rx_bd_haddr_hi = (u64) buf_map >> 32;
+               rxbd->rx_bd_haddr_lo = (u64) buf_map & 0xffffffff;
+       }
+       val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
+       cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
+       rxbd->rx_bd_haddr_hi = val;
+
+       val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
+       cnic_ctx_wr(dev, cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
+       rxbd->rx_bd_haddr_lo = val;
+
+       val = cnic_reg_rd_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD);
+       cnic_reg_wr_ind(dev, BNX2_RXP_SCRATCH_RXP_FLOOD, val | (1 << 2));
+}
+
+static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
+{
+       struct kwqe *wqes[1], l2kwqe;
+
+       memset(&l2kwqe, 0, sizeof(l2kwqe));
+       wqes[0] = &l2kwqe;
+       l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) |
+                             (L2_KWQE_OPCODE_VALUE_FLUSH <<
+                              KWQE_OPCODE_SHIFT) | 2;
+       dev->submit_kwqes(dev, wqes, 1);
+}
+
+static void cnic_set_bnx2_mac(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       u32 val;
+
+       val = cp->func << 2;
+
+       cp->shmem_base = cnic_reg_rd_ind(dev, BNX2_SHM_HDR_ADDR_0 + val);
+
+       val = cnic_reg_rd_ind(dev, cp->shmem_base +
+                             BNX2_PORT_HW_CFG_ISCSI_MAC_UPPER);
+       dev->mac_addr[0] = (u8) (val >> 8);
+       dev->mac_addr[1] = (u8) val;
+
+       CNIC_WR(dev, BNX2_EMAC_MAC_MATCH4, val);
+
+       val = cnic_reg_rd_ind(dev, cp->shmem_base +
+                             BNX2_PORT_HW_CFG_ISCSI_MAC_LOWER);
+       dev->mac_addr[2] = (u8) (val >> 24);
+       dev->mac_addr[3] = (u8) (val >> 16);
+       dev->mac_addr[4] = (u8) (val >> 8);
+       dev->mac_addr[5] = (u8) val;
+
+       CNIC_WR(dev, BNX2_EMAC_MAC_MATCH5, val);
+
+       val = 4 | BNX2_RPM_SORT_USER2_BC_EN;
+       if (CHIP_NUM(cp) != CHIP_NUM_5709)
+               val |= BNX2_RPM_SORT_USER2_PROM_VLAN;
+
+       CNIC_WR(dev, BNX2_RPM_SORT_USER2, 0x0);
+       CNIC_WR(dev, BNX2_RPM_SORT_USER2, val);
+       CNIC_WR(dev, BNX2_RPM_SORT_USER2, val | BNX2_RPM_SORT_USER2_ENA);
+}
+
+static int cnic_start_bnx2_hw(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       struct status_block *sblk = cp->status_blk;
+       u32 val;
+       int err;
+
+       cnic_set_bnx2_mac(dev);
+
+       val = CNIC_RD(dev, BNX2_MQ_CONFIG);
+       val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
+       if (BCM_PAGE_BITS > 12)
+               val |= (12 - 8)  << 4;
+       else
+               val |= (BCM_PAGE_BITS - 8)  << 4;
+
+       CNIC_WR(dev, BNX2_MQ_CONFIG, val);
+
+       CNIC_WR(dev, BNX2_HC_COMP_PROD_TRIP, (2 << 16) | 8);
+       CNIC_WR(dev, BNX2_HC_COM_TICKS, (64 << 16) | 220);
+       CNIC_WR(dev, BNX2_HC_CMD_TICKS, (64 << 16) | 220);
+
+       err = cnic_setup_5709_context(dev, 1);
+       if (err)
+               return err;
+
+       cnic_init_context(dev, KWQ_CID);
+       cnic_init_context(dev, KCQ_CID);
+
+       cp->kwq_cid_addr = GET_CID_ADDR(KWQ_CID);
+       cp->kwq_io_addr = MB_GET_CID_ADDR(KWQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+       cp->max_kwq_idx = MAX_KWQ_IDX;
+       cp->kwq_prod_idx = 0;
+       cp->kwq_con_idx = 0;
+       cp->cnic_local_flags |= CNIC_LCL_FL_KWQ_INIT;
+
+       if (CHIP_NUM(cp) == CHIP_NUM_5706 || CHIP_NUM(cp) == CHIP_NUM_5708)
+               cp->kwq_con_idx_ptr = &sblk->status_rx_quick_consumer_index15;
+       else
+               cp->kwq_con_idx_ptr = &sblk->status_cmd_consumer_index;
+
+       /* Initialize the kernel work queue context. */
+       val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+             (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+       cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_TYPE, val);
+
+       val = (BCM_PAGE_SIZE / sizeof(struct kwqe) - 1) << 16;
+       cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+       val = ((BCM_PAGE_SIZE / sizeof(struct kwqe)) << 16) | KWQ_PAGE_CNT;
+       cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+       val = (u32) ((u64) cp->kwq_info.pgtbl_map >> 32);
+       cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+       val = (u32) cp->kwq_info.pgtbl_map;
+       cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+       cp->kcq_cid_addr = GET_CID_ADDR(KCQ_CID);
+       cp->kcq_io_addr = MB_GET_CID_ADDR(KCQ_CID) + L5_KRNLQ_HOST_QIDX;
+
+       cp->kcq_prod_idx = 0;
+
+       /* Initialize the kernel complete queue context. */
+       val = KRNLQ_TYPE_TYPE_KRNLQ | KRNLQ_SIZE_TYPE_SIZE |
+             (BCM_PAGE_BITS - 8) | KRNLQ_FLAGS_QE_SELF_SEQ;
+       cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_TYPE, val);
+
+       val = (BCM_PAGE_SIZE / sizeof(struct kcqe) - 1) << 16;
+       cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_QE_SELF_SEQ_MAX, val);
+
+       val = ((BCM_PAGE_SIZE / sizeof(struct kcqe)) << 16) | KCQ_PAGE_CNT;
+       cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_NPAGES, val);
+
+       val = (u32) ((u64) cp->kcq_info.pgtbl_map >> 32);
+       cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_HI, val);
+
+       val = (u32) cp->kcq_info.pgtbl_map;
+       cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_PGTBL_HADDR_LO, val);
+
+       cp->int_num = 0;
+       if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
+               u32 sb_id = cp->status_blk_num;
+               u32 sb = BNX2_L2CTX_STATUSB_NUM(sb_id);
+
+               cp->int_num = sb_id << BNX2_PCICFG_INT_ACK_CMD_INT_NUM_SHIFT;
+               cnic_ctx_wr(dev, cp->kwq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+               cnic_ctx_wr(dev, cp->kcq_cid_addr, L5_KRNLQ_HOST_QIDX, sb);
+       }
+
+       /* Enable Commnad Scheduler notification when we write to the
+        * host producer index of the kernel contexts. */
+       CNIC_WR(dev, BNX2_MQ_KNL_CMD_MASK1, 2);
+
+       /* Enable Command Scheduler notification when we write to either
+        * the Send Queue or Receive Queue producer indexes of the kernel
+        * bypass contexts. */
+       CNIC_WR(dev, BNX2_MQ_KNL_BYP_CMD_MASK1, 7);
+       CNIC_WR(dev, BNX2_MQ_KNL_BYP_WRITE_MASK1, 7);
+
+       /* Notify COM when the driver post an application buffer. */
+       CNIC_WR(dev, BNX2_MQ_KNL_RX_V2P_MASK2, 0x2000);
+
+       /* Set the CP and COM doorbells.  These two processors polls the
+        * doorbell for a non zero value before running.  This must be done
+        * after setting up the kernel queue contexts. */
+       cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 1);
+       cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 1);
+
+       cnic_init_bnx2_tx_ring(dev);
+       cnic_init_bnx2_rx_ring(dev);
+
+       err = cnic_init_bnx2_irq(dev);
+       if (err) {
+               printk(KERN_ERR PFX "%s: cnic_init_irq failed\n",
+                      dev->netdev->name);
+               cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+               cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+               return err;
+       }
+
+       return 0;
+}
+
+static int cnic_start_hw(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+       int err;
+
+       if (test_bit(CNIC_F_CNIC_UP, &dev->flags))
+               return -EALREADY;
+
+       err = ethdev->drv_register_cnic(dev->netdev, cp->cnic_ops, dev);
+       if (err) {
+               printk(KERN_ERR PFX "%s: register_cnic failed\n",
+                      dev->netdev->name);
+               goto err2;
+       }
+
+       dev->regview = ethdev->io_base;
+       cp->chip_id = ethdev->chip_id;
+       pci_dev_get(dev->pcidev);
+       cp->func = PCI_FUNC(dev->pcidev->devfn);
+       cp->status_blk = ethdev->irq_arr[0].status_blk;
+       cp->status_blk_num = ethdev->irq_arr[0].status_blk_num;
+
+       err = cp->alloc_resc(dev);
+       if (err) {
+               printk(KERN_ERR PFX "%s: allocate resource failure\n",
+                      dev->netdev->name);
+               goto err1;
+       }
+
+       err = cp->start_hw(dev);
+       if (err)
+               goto err1;
+
+       err = cnic_cm_open(dev);
+       if (err)
+               goto err1;
+
+       set_bit(CNIC_F_CNIC_UP, &dev->flags);
+
+       cp->enable_int(dev);
+
+       return 0;
+
+err1:
+       ethdev->drv_unregister_cnic(dev->netdev);
+       cp->free_resc(dev);
+       pci_dev_put(dev->pcidev);
+err2:
+       return err;
+}
+
+static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
+{
+       struct cnic_local *cp = dev->cnic_priv;
+       struct cnic_eth_dev *ethdev = cp->ethdev;
+
+       cnic_disable_bnx2_int_sync(dev);
+
+       cnic_reg_wr_ind(dev, BNX2_CP_SCRATCH + 0x20, 0);
+       cnic_reg_wr_ind(dev, BNX2_COM_SCRATCH + 0x20, 0);
+
+       cnic_init_context(dev, KWQ_CID);
+       cnic_init_context(dev, KCQ_CID);
+
+       cnic_setup_5709_context(dev, 0);
+       cnic_free_irq(dev);
+
+       ethdev->drv_unregister_cnic(dev->netdev);
+
+       cnic_free_resc(dev);
+}
+
+static void cnic_stop_hw(struct cnic_dev *dev)
+{
+       if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+               struct cnic_local *cp = dev->cnic_priv;
+
+               clear_bit(CNIC_F_CNIC_UP, &dev->flags);
+               rcu_assign_pointer(cp->ulp_ops[CNIC_ULP_L4], NULL);
+               synchronize_rcu();
+               cnic_cm_shutdown(dev);
+               cp->stop_hw(dev);
+               pci_dev_put(dev->pcidev);
+       }
+}
+
+static void cnic_free_dev(struct cnic_dev *dev)
+{
+       int i = 0;
+
+       while ((atomic_read(&dev->ref_count) != 0) && i < 10) {
+               msleep(100);
+               i++;
+       }
+       if (atomic_read(&dev->ref_count) != 0)
+               printk(KERN_ERR PFX "%s: Failed waiting for ref count to go"
+                                   " to zero.\n", dev->netdev->name);
+
+       printk(KERN_INFO PFX "Removed CNIC device: %s\n", dev->netdev->name);
+       dev_put(dev->netdev);
+       kfree(dev);
+}
+
+static struct cnic_dev *cnic_alloc_dev(struct net_device *dev,
+                                      struct pci_dev *pdev)
+{
+       struct cnic_dev *cdev;
+       struct cnic_local *cp;
+       int alloc_size;
+
+       alloc_size = sizeof(struct cnic_dev) + sizeof(struct cnic_local);
+
+       cdev = kzalloc(alloc_size , GFP_KERNEL);
+       if (cdev == NULL) {
+               printk(KERN_ERR PFX "%s: allocate dev struct failure\n",
+                      dev->name);
+               return NULL;
+       }
+
+       cdev->netdev = dev;
+       cdev->cnic_priv = (char *)cdev + sizeof(struct cnic_dev);
+       cdev->register_device = cnic_register_device;
+       cdev->unregister_device = cnic_unregister_device;
+       cdev->iscsi_nl_msg_recv = cnic_iscsi_nl_msg_recv;
+
+       cp = cdev->cnic_priv;
+       cp->dev = cdev;
+       cp->uio_dev = -1;
+       cp->l2_single_buf_size = 0x400;
+       cp->l2_rx_ring_size = 3;
+
+       spin_lock_init(&cp->cnic_ulp_lock);
+
+       printk(KERN_INFO PFX "Added CNIC device: %s\n", dev->name);
+
+       return cdev;
+}
+
+static struct cnic_dev *init_bnx2_cnic(struct net_device *dev)
+{
+       struct pci_dev *pdev;
+       struct cnic_dev *cdev;
+       struct cnic_local *cp;
+       struct cnic_eth_dev *ethdev = NULL;
+       struct cnic_eth_dev *(*probe)(void *) = NULL;
+
+       probe = __symbol_get("bnx2_cnic_probe");
+       if (probe) {
+               ethdev = (*probe)(dev);
+               symbol_put_addr(probe);
+       }
+       if (!ethdev)
+               return NULL;
+
+       pdev = ethdev->pdev;
+       if (!pdev)
+               return NULL;
+
+       dev_hold(dev);
+       pci_dev_get(pdev);
+       if (pdev->device == PCI_DEVICE_ID_NX2_5709 ||
+           pdev->device == PCI_DEVICE_ID_NX2_5709S) {
+               u8 rev;
+
+               pci_read_config_byte(pdev, PCI_REVISION_ID, &rev);
+               if (rev < 0x10) {
+                       pci_dev_put(pdev);
+                       goto cnic_err;
+               }
+       }
+       pci_dev_put(pdev);
+
+       cdev = cnic_alloc_dev(dev, pdev);
+       if (cdev == NULL)
+               goto cnic_err;
+
+       set_bit(CNIC_F_BNX2_CLASS, &cdev->flags);
+       cdev->submit_kwqes = cnic_submit_bnx2_kwqes;
+
+       cp = cdev->cnic_priv;
+       cp->ethdev = ethdev;
+       cdev->pcidev = pdev;
+
+       cp->cnic_ops = &cnic_bnx2_ops;
+       cp->start_hw = cnic_start_bnx2_hw;
+       cp->stop_hw = cnic_stop_bnx2_hw;
+       cp->setup_pgtbl = cnic_setup_page_tbl;
+       cp->alloc_resc = cnic_alloc_bnx2_resc;
+       cp->free_resc = cnic_free_resc;
+       cp->start_cm = cnic_cm_init_bnx2_hw;
+       cp->stop_cm = cnic_cm_stop_bnx2_hw;
+       cp->enable_int = cnic_enable_bnx2_int;
+       cp->disable_int_sync = cnic_disable_bnx2_int_sync;
+       cp->close_conn = cnic_close_bnx2_conn;
+       cp->next_idx = cnic_bnx2_next_idx;
+       cp->hw_idx = cnic_bnx2_hw_idx;
+       return cdev;
+
+cnic_err:
+       dev_put(dev);
+       return NULL;
+}
+
+static struct cnic_dev *is_cnic_dev(struct net_device *dev)
+{
+       struct ethtool_drvinfo drvinfo;
+       struct cnic_dev *cdev = NULL;
+
+       if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
+               memset(&drvinfo, 0, sizeof(drvinfo));
+               dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
+
+               if (!strcmp(drvinfo.driver, "bnx2"))
+                       cdev = init_bnx2_cnic(dev);
+               if (cdev) {
+                       write_lock(&cnic_dev_lock);
+                       list_add(&cdev->list, &cnic_dev_list);
+                       write_unlock(&cnic_dev_lock);
+               }
+       }
+       return cdev;
+}
+
+/**
+ * netdev event handler
+ */
+static int cnic_netdev_event(struct notifier_block *this, unsigned long event,
+                                                        void *ptr)
+{
+       struct net_device *netdev = ptr;
+       struct cnic_dev *dev;
+       int if_type;
+       int new_dev = 0;
+
+       dev = cnic_from_netdev(netdev);
+
+       if (!dev && (event == NETDEV_REGISTER || event == NETDEV_UP)) {
+               /* Check for the hot-plug device */
+               dev = is_cnic_dev(netdev);
+               if (dev) {
+                       new_dev = 1;
+                       cnic_hold(dev);
+               }
+       }
+       if (dev) {
+               struct cnic_local *cp = dev->cnic_priv;
+
+               if (new_dev)
+                       cnic_ulp_init(dev);
+               else if (event == NETDEV_UNREGISTER)
+                       cnic_ulp_exit(dev);
+               else if (event == NETDEV_UP) {
+                       mutex_lock(&cnic_lock);
+                       if (!cnic_start_hw(dev))
+                               cnic_ulp_start(dev);
+                       mutex_unlock(&cnic_lock);
+               }
+
+               rcu_read_lock();
+               for (if_type = 0; if_type < MAX_CNIC_ULP_TYPE; if_type++) {
+                       struct cnic_ulp_ops *ulp_ops;
+                       void *ctx;
+
+                       ulp_ops = rcu_dereference(cp->ulp_ops[if_type]);
+                       if (!ulp_ops || !ulp_ops->indicate_netevent)
+                               continue;
+
+                       ctx = cp->ulp_handle[if_type];
+
+                       ulp_ops->indicate_netevent(ctx, event);
+               }
+               rcu_read_unlock();
+
+               if (event == NETDEV_GOING_DOWN) {
+                       mutex_lock(&cnic_lock);
+                       cnic_ulp_stop(dev);
+                       cnic_stop_hw(dev);
+                       mutex_unlock(&cnic_lock);
+               } else if (event == NETDEV_UNREGISTER) {
+                       write_lock(&cnic_dev_lock);
+                       list_del_init(&dev->list);
+                       write_unlock(&cnic_dev_lock);
+
+                       cnic_put(dev);
+                       cnic_free_dev(dev);
+                       goto done;
+               }
+               cnic_put(dev);
+       }
+done:
+       return NOTIFY_DONE;
+}
+
+static struct notifier_block cnic_netdev_notifier = {
+       .notifier_call = cnic_netdev_event
+};
+
+static void cnic_release(void)
+{
+       struct cnic_dev *dev;
+
+       while (!list_empty(&cnic_dev_list)) {
+               dev = list_entry(cnic_dev_list.next, struct cnic_dev, list);
+               if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
+                       cnic_ulp_stop(dev);
+                       cnic_stop_hw(dev);
+               }
+
+               cnic_ulp_exit(dev);
+               list_del_init(&dev->list);
+               cnic_free_dev(dev);
+       }
+}
+
+static int __init cnic_init(void)
+{
+       int rc = 0;
+
+       printk(KERN_INFO "%s", version);
+
+       rc = register_netdevice_notifier(&cnic_netdev_notifier);
+       if (rc) {
+               cnic_release();
+               return rc;
+       }
+
+       return 0;
+}
+
+static void __exit cnic_exit(void)
+{
+       unregister_netdevice_notifier(&cnic_netdev_notifier);
+       cnic_release();
+       return;
+}
+
+module_init(cnic_init);
+module_exit(cnic_exit);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
new file mode 100644 (file)
index 0000000..5192d4a
--- /dev/null
@@ -0,0 +1,299 @@
+/* cnic.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_H
+#define CNIC_H
+
+#define KWQ_PAGE_CNT   4
+#define KCQ_PAGE_CNT   16
+
+#define KWQ_CID                24
+#define KCQ_CID                25
+
+/*
+ *     krnlq_context definition
+ */
+#define L5_KRNLQ_FLAGS 0x00000000
+#define L5_KRNLQ_SIZE  0x00000000
+#define L5_KRNLQ_TYPE  0x00000000
+#define KRNLQ_FLAGS_PG_SZ                                      (0xf<<0)
+#define KRNLQ_FLAGS_PG_SZ_256                                  (0<<0)
+#define KRNLQ_FLAGS_PG_SZ_512                                  (1<<0)
+#define KRNLQ_FLAGS_PG_SZ_1K                                   (2<<0)
+#define KRNLQ_FLAGS_PG_SZ_2K                                   (3<<0)
+#define KRNLQ_FLAGS_PG_SZ_4K                                   (4<<0)
+#define KRNLQ_FLAGS_PG_SZ_8K                                   (5<<0)
+#define KRNLQ_FLAGS_PG_SZ_16K                                  (6<<0)
+#define KRNLQ_FLAGS_PG_SZ_32K                                  (7<<0)
+#define KRNLQ_FLAGS_PG_SZ_64K                                  (8<<0)
+#define KRNLQ_FLAGS_PG_SZ_128K                                 (9<<0)
+#define KRNLQ_FLAGS_PG_SZ_256K                                 (10<<0)
+#define KRNLQ_FLAGS_PG_SZ_512K                                 (11<<0)
+#define KRNLQ_FLAGS_PG_SZ_1M                                   (12<<0)
+#define KRNLQ_FLAGS_PG_SZ_2M                                   (13<<0)
+#define KRNLQ_FLAGS_QE_SELF_SEQ                                        (1<<15)
+#define KRNLQ_SIZE_TYPE_SIZE   ((((0x28 + 0x1f) & ~0x1f) / 0x20) << 16)
+#define KRNLQ_TYPE_TYPE                                                (0xf<<28)
+#define KRNLQ_TYPE_TYPE_EMPTY                                  (0<<28)
+#define KRNLQ_TYPE_TYPE_KRNLQ                                  (6<<28)
+
+#define L5_KRNLQ_HOST_QIDX             0x00000004
+#define L5_KRNLQ_HOST_FW_QIDX          0x00000008
+#define L5_KRNLQ_NX_QE_SELF_SEQ        0x0000000c
+#define L5_KRNLQ_QE_SELF_SEQ_MAX       0x0000000c
+#define L5_KRNLQ_NX_QE_HADDR_HI        0x00000010
+#define L5_KRNLQ_NX_QE_HADDR_LO        0x00000014
+#define L5_KRNLQ_PGTBL_PGIDX           0x00000018
+#define L5_KRNLQ_NX_PG_QIDX            0x00000018
+#define L5_KRNLQ_PGTBL_NPAGES          0x0000001c
+#define L5_KRNLQ_QIDX_INCR             0x0000001c
+#define L5_KRNLQ_PGTBL_HADDR_HI        0x00000020
+#define L5_KRNLQ_PGTBL_HADDR_LO        0x00000024
+
+#define BNX2_PG_CTX_MAP                        0x1a0034
+#define BNX2_ISCSI_CTX_MAP             0x1a0074
+
+struct cnic_redirect_entry {
+       struct dst_entry *old_dst;
+       struct dst_entry *new_dst;
+};
+
+#define MAX_COMPLETED_KCQE     64
+
+#define MAX_CNIC_L5_CONTEXT    256
+
+#define MAX_CM_SK_TBL_SZ       MAX_CNIC_L5_CONTEXT
+
+#define MAX_ISCSI_TBL_SZ       256
+
+#define CNIC_LOCAL_PORT_MIN    60000
+#define CNIC_LOCAL_PORT_MAX    61000
+#define CNIC_LOCAL_PORT_RANGE  (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
+
+#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
+#define KCQE_CNT (BCM_PAGE_SIZE / sizeof(struct kcqe))
+#define MAX_KWQE_CNT (KWQE_CNT - 1)
+#define MAX_KCQE_CNT (KCQE_CNT - 1)
+
+#define MAX_KWQ_IDX    ((KWQ_PAGE_CNT * KWQE_CNT) - 1)
+#define MAX_KCQ_IDX    ((KCQ_PAGE_CNT * KCQE_CNT) - 1)
+
+#define KWQ_PG(x) (((x) & ~MAX_KWQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KWQ_IDX(x) ((x) & MAX_KWQE_CNT)
+
+#define KCQ_PG(x) (((x) & ~MAX_KCQE_CNT) >> (BCM_PAGE_BITS - 5))
+#define KCQ_IDX(x) ((x) & MAX_KCQE_CNT)
+
+#define BNX2X_NEXT_KCQE(x) (((x) & (MAX_KCQE_CNT - 1)) ==              \
+               (MAX_KCQE_CNT - 1)) ?                                   \
+               (x) + 2 : (x) + 1
+
+#define BNX2X_KWQ_DATA_PG(cp, x) ((x) / (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA_IDX(cp, x) ((x) % (cp)->kwq_16_data_pp)
+#define BNX2X_KWQ_DATA(cp, x)                                          \
+       &(cp)->kwq_16_data[BNX2X_KWQ_DATA_PG(cp, x)][BNX2X_KWQ_DATA_IDX(cp, x)]
+
+#define DEF_IPID_COUNT         0xc001
+
+#define DEF_KA_TIMEOUT         10000
+#define DEF_KA_INTERVAL                300000
+#define DEF_KA_MAX_PROBE_COUNT 3
+#define DEF_TOS                        0
+#define DEF_TTL                        0xfe
+#define DEF_SND_SEQ_SCALE      0
+#define DEF_RCV_BUF            0xffff
+#define DEF_SND_BUF            0xffff
+#define DEF_SEED               0
+#define DEF_MAX_RT_TIME                500
+#define DEF_MAX_DA_COUNT       2
+#define DEF_SWS_TIMER          1000
+#define DEF_MAX_CWND           0xffff
+
+struct cnic_ctx {
+       u32             cid;
+       void            *ctx;
+       dma_addr_t      mapping;
+};
+
+#define BNX2_MAX_CID           0x2000
+
+struct cnic_dma {
+       int             num_pages;
+       void            **pg_arr;
+       dma_addr_t      *pg_map_arr;
+       int             pgtbl_size;
+       u32             *pgtbl;
+       dma_addr_t      pgtbl_map;
+};
+
+struct cnic_id_tbl {
+       spinlock_t      lock;
+       u32             start;
+       u32             max;
+       u32             next;
+       unsigned long   *table;
+};
+
+#define CNIC_KWQ16_DATA_SIZE   128
+
+struct kwqe_16_data {
+       u8      data[CNIC_KWQ16_DATA_SIZE];
+};
+
+struct cnic_iscsi {
+       struct cnic_dma         task_array_info;
+       struct cnic_dma         r2tq_info;
+       struct cnic_dma         hq_info;
+};
+
+struct cnic_context {
+       u32                     cid;
+       struct kwqe_16_data     *kwqe_data;
+       dma_addr_t              kwqe_data_mapping;
+       wait_queue_head_t       waitq;
+       int                     wait_cond;
+       unsigned long           timestamp;
+       u32                     ctx_flags;
+#define        CTX_FL_OFFLD_START      0x00000001
+       u8                      ulp_proto_id;
+       union {
+               struct cnic_iscsi       *iscsi;
+       } proto;
+};
+
+struct cnic_local {
+
+       spinlock_t cnic_ulp_lock;
+       void *ulp_handle[MAX_CNIC_ULP_TYPE];
+       unsigned long ulp_flags[MAX_CNIC_ULP_TYPE];
+#define ULP_F_INIT     0
+#define ULP_F_START    1
+       struct cnic_ulp_ops *ulp_ops[MAX_CNIC_ULP_TYPE];
+
+       /* protected by ulp_lock */
+       u32 cnic_local_flags;
+#define        CNIC_LCL_FL_KWQ_INIT    0x00000001
+
+       struct cnic_dev *dev;
+
+       struct cnic_eth_dev *ethdev;
+
+       void            *l2_ring;
+       dma_addr_t      l2_ring_map;
+       int             l2_ring_size;
+       int             l2_rx_ring_size;
+
+       void            *l2_buf;
+       dma_addr_t      l2_buf_map;
+       int             l2_buf_size;
+       int             l2_single_buf_size;
+
+       u16             *rx_cons_ptr;
+       u16             *tx_cons_ptr;
+       u16             rx_cons;
+       u16             tx_cons;
+
+       u32 kwq_cid_addr;
+       u32 kcq_cid_addr;
+
+       struct cnic_dma         kwq_info;
+       struct kwqe             **kwq;
+
+       struct cnic_dma         kwq_16_data_info;
+
+       u16             max_kwq_idx;
+
+       u16             kwq_prod_idx;
+       u32             kwq_io_addr;
+
+       u16             *kwq_con_idx_ptr;
+       u16             kwq_con_idx;
+
+       struct cnic_dma kcq_info;
+       struct kcqe     **kcq;
+
+       u16             kcq_prod_idx;
+       u32             kcq_io_addr;
+
+       void                            *status_blk;
+       struct status_block_msix        *bnx2_status_blk;
+       struct host_status_block        *bnx2x_status_blk;
+
+       u32                             status_blk_num;
+       u32                             int_num;
+       u32                             last_status_idx;
+       struct tasklet_struct           cnic_irq_task;
+
+       struct kcqe             *completed_kcq[MAX_COMPLETED_KCQE];
+
+       struct cnic_sock        *csk_tbl;
+       struct cnic_id_tbl      csk_port_tbl;
+
+       struct cnic_dma         conn_buf_info;
+       struct cnic_dma         gbl_buf_info;
+
+       struct cnic_iscsi       *iscsi_tbl;
+       struct cnic_context     *ctx_tbl;
+       struct cnic_id_tbl      cid_tbl;
+       int                     max_iscsi_conn;
+       atomic_t                iscsi_conn;
+
+       /* per connection parameters */
+       int                     num_iscsi_tasks;
+       int                     num_ccells;
+       int                     task_array_size;
+       int                     r2tq_size;
+       int                     hq_size;
+       int                     num_cqs;
+
+       struct cnic_ctx         *ctx_arr;
+       int                     ctx_blks;
+       int                     ctx_blk_size;
+       int                     cids_per_blk;
+
+       u32                     chip_id;
+       int                     func;
+       u32                     shmem_base;
+
+       u32                     uio_dev;
+       struct uio_info         *cnic_uinfo;
+
+       struct cnic_ops         *cnic_ops;
+       int                     (*start_hw)(struct cnic_dev *);
+       void                    (*stop_hw)(struct cnic_dev *);
+       void                    (*setup_pgtbl)(struct cnic_dev *,
+                                              struct cnic_dma *);
+       int                     (*alloc_resc)(struct cnic_dev *);
+       void                    (*free_resc)(struct cnic_dev *);
+       int                     (*start_cm)(struct cnic_dev *);
+       void                    (*stop_cm)(struct cnic_dev *);
+       void                    (*enable_int)(struct cnic_dev *);
+       void                    (*disable_int_sync)(struct cnic_dev *);
+       void                    (*ack_int)(struct cnic_dev *);
+       void                    (*close_conn)(struct cnic_sock *, u32 opcode);
+       u16                     (*next_idx)(u16);
+       u16                     (*hw_idx)(u16);
+};
+
+struct bnx2x_bd_chain_next {
+       u32     addr_lo;
+       u32     addr_hi;
+       u8      reserved[8];
+};
+
+#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN                (ISCSI_KCQE_OPCODE_UPDATE_CONN)
+#define ISCSI_RAMROD_CMD_ID_INIT               (ISCSI_KCQE_OPCODE_INIT)
+
+#define CDU_REGION_NUMBER_XCM_AG 2
+#define CDU_REGION_NUMBER_UCM_AG 4
+
+#endif
+
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
new file mode 100644 (file)
index 0000000..cee80f6
--- /dev/null
@@ -0,0 +1,580 @@
+
+/* cnic.c: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006-2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+#ifndef CNIC_DEFS_H
+#define CNIC_DEFS_H
+
+/* KWQ (kernel work queue) request op codes */
+#define L2_KWQE_OPCODE_VALUE_FLUSH                  (4)
+
+#define L4_KWQE_OPCODE_VALUE_CONNECT1               (50)
+#define L4_KWQE_OPCODE_VALUE_CONNECT2               (51)
+#define L4_KWQE_OPCODE_VALUE_CONNECT3               (52)
+#define L4_KWQE_OPCODE_VALUE_RESET                  (53)
+#define L4_KWQE_OPCODE_VALUE_CLOSE                  (54)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_SECRET          (60)
+#define L4_KWQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KWQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KWQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KWQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+#define L5CM_RAMROD_CMD_ID_BASE                        (0x80)
+#define L5CM_RAMROD_CMD_ID_TCP_CONNECT         (L5CM_RAMROD_CMD_ID_BASE + 3)
+#define L5CM_RAMROD_CMD_ID_CLOSE               (L5CM_RAMROD_CMD_ID_BASE + 12)
+#define L5CM_RAMROD_CMD_ID_ABORT               (L5CM_RAMROD_CMD_ID_BASE + 13)
+#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE     (L5CM_RAMROD_CMD_ID_BASE + 14)
+#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD   (L5CM_RAMROD_CMD_ID_BASE + 15)
+
+/* KCQ (kernel completion queue) response op codes */
+#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP             (53)
+#define L4_KCQE_OPCODE_VALUE_RESET_COMP             (54)
+#define L4_KCQE_OPCODE_VALUE_FW_TCP_UPDATE          (55)
+#define L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE       (56)
+#define L4_KCQE_OPCODE_VALUE_RESET_RECEIVED         (57)
+#define L4_KCQE_OPCODE_VALUE_CLOSE_RECEIVED         (58)
+#define L4_KCQE_OPCODE_VALUE_INIT_ULP               (61)
+
+#define L4_KCQE_OPCODE_VALUE_OFFLOAD_PG             (1)
+#define L4_KCQE_OPCODE_VALUE_UPDATE_PG              (9)
+#define L4_KCQE_OPCODE_VALUE_UPLOAD_PG              (14)
+
+/* KCQ (kernel completion queue) completion status */
+#define L4_KCQE_COMPLETION_STATUS_SUCCESS                  (0)
+#define L4_KCQE_COMPLETION_STATUS_TIMEOUT        (0x93)
+
+#define L4_LAYER_CODE (4)
+#define L2_LAYER_CODE (2)
+
+/*
+ * L4 KCQ CQE
+ */
+struct l4_kcq {
+       u32 cid;
+       u32 pg_cid;
+       u32 conn_id;
+       u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+       u16 status;
+       u16 reserved1;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved1;
+       u16 status;
+#endif
+       u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KCQ_RESERVED3 (0x7<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+       u8 op_code;
+       u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+       u16 qe_self_seq;
+       u8 op_code;
+       u8 flags;
+#define L4_KCQ_RESERVED3 (0xF<<0)
+#define L4_KCQ_RESERVED3_SHIFT 0
+#define L4_KCQ_RAMROD_COMPLETION (0x1<<3) /* Everest only */
+#define L4_KCQ_RAMROD_COMPLETION_SHIFT 3
+#define L4_KCQ_LAYER_CODE (0x7<<4)
+#define L4_KCQ_LAYER_CODE_SHIFT 4
+#define L4_KCQ_RESERVED4 (0x1<<7)
+#define L4_KCQ_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * L4 KCQ CQE PG upload
+ */
+struct l4_kcq_upload_pg {
+       u32 pg_cid;
+#if defined(__BIG_ENDIAN)
+       u16 pg_status;
+       u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+       u16 pg_ipid_count;
+       u16 pg_status;
+#endif
+       u32 reserved1[5];
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+       u8 op_code;
+       u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+       u16 qe_self_seq;
+       u8 op_code;
+       u8 flags;
+#define L4_KCQ_UPLOAD_PG_RESERVED3 (0xF<<0)
+#define L4_KCQ_UPLOAD_PG_RESERVED3_SHIFT 0
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KCQ_UPLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KCQ_UPLOAD_PG_RESERVED4 (0x1<<7)
+#define L4_KCQ_UPLOAD_PG_RESERVED4_SHIFT 7
+#endif
+};
+
+
+/*
+ * Gracefully close the connection request
+ */
+struct l4_kwq_close_req {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+       u8 op_code;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_code;
+       u8 flags;
+#define L4_KWQ_CLOSE_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_CLOSE_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CLOSE_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CLOSE_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+       u32 cid;
+       u32 reserved2[6];
+};
+
+
+/*
+ * The first request to be passed in order to establish connection in option2
+ */
+struct l4_kwq_connect_req1 {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+       u8 op_code;
+       u8 reserved0;
+       u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+       u8 conn_flags;
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_IS_PG_HOST_OPAQUE_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_IP_V6 (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_IP_V6_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_PASSIVE_FLAG_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_RSRV (0x1F<<3)
+#define L4_KWQ_CONNECT_REQ1_RSRV_SHIFT 3
+       u8 reserved0;
+       u8 op_code;
+       u8 flags;
+#define L4_KWQ_CONNECT_REQ1_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ1_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ1_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ1_LINKED_WITH_NEXT_SHIFT 7
+#endif
+       u32 cid;
+       u32 pg_cid;
+       u32 src_ip;
+       u32 dst_ip;
+#if defined(__BIG_ENDIAN)
+       u16 dst_port;
+       u16 src_port;
+#elif defined(__LITTLE_ENDIAN)
+       u16 src_port;
+       u16 dst_port;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 rsrv1[3];
+       u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+#elif defined(__LITTLE_ENDIAN)
+       u8 tcp_flags;
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK (0x1<<0)
+#define L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK_SHIFT 0
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE (0x1<<1)
+#define L4_KWQ_CONNECT_REQ1_KEEP_ALIVE_SHIFT 1
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE (0x1<<2)
+#define L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE_SHIFT 2
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP (0x1<<3)
+#define L4_KWQ_CONNECT_REQ1_TIME_STAMP_SHIFT 3
+#define L4_KWQ_CONNECT_REQ1_SACK (0x1<<4)
+#define L4_KWQ_CONNECT_REQ1_SACK_SHIFT 4
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING (0x1<<5)
+#define L4_KWQ_CONNECT_REQ1_SEG_SCALING_SHIFT 5
+#define L4_KWQ_CONNECT_REQ1_RESERVED2 (0x3<<6)
+#define L4_KWQ_CONNECT_REQ1_RESERVED2_SHIFT 6
+       u8 rsrv1[3];
+#endif
+       u32 rsrv2;
+};
+
+
+/*
+ * The second ( optional )request to be passed in order to establish
+ * connection in option2 - for IPv6 only
+ */
+struct l4_kwq_connect_req2 {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+       u8 op_code;
+       u8 reserved0;
+       u8 rsrv;
+#elif defined(__LITTLE_ENDIAN)
+       u8 rsrv;
+       u8 reserved0;
+       u8 op_code;
+       u8 flags;
+#define L4_KWQ_CONNECT_REQ2_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ2_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ2_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ2_LINKED_WITH_NEXT_SHIFT 7
+#endif
+       u32 reserved2;
+       u32 src_ip_v6_2;
+       u32 src_ip_v6_3;
+       u32 src_ip_v6_4;
+       u32 dst_ip_v6_2;
+       u32 dst_ip_v6_3;
+       u32 dst_ip_v6_4;
+};
+
+
+/*
+ * The third ( and last )request to be passed in order to establish
+ * connection in option2
+ */
+struct l4_kwq_connect_req3 {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+       u8 op_code;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_code;
+       u8 flags;
+#define L4_KWQ_CONNECT_REQ3_RESERVED1 (0xF<<0)
+#define L4_KWQ_CONNECT_REQ3_RESERVED1_SHIFT 0
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE (0x7<<4)
+#define L4_KWQ_CONNECT_REQ3_LAYER_CODE_SHIFT 4
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_CONNECT_REQ3_LINKED_WITH_NEXT_SHIFT 7
+#endif
+       u32 ka_timeout;
+       u32 ka_interval ;
+#if defined(__BIG_ENDIAN)
+       u8 snd_seq_scale;
+       u8 ttl;
+       u8 tos;
+       u8 ka_max_probe_count;
+#elif defined(__LITTLE_ENDIAN)
+       u8 ka_max_probe_count;
+       u8 tos;
+       u8 ttl;
+       u8 snd_seq_scale;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 pmtu;
+       u16 mss;
+#elif defined(__LITTLE_ENDIAN)
+       u16 mss;
+       u16 pmtu;
+#endif
+       u32 rcv_buf;
+       u32 snd_buf;
+       u32 seed;
+};
+
+
+/*
+ * a KWQE request to offload a PG connection
+ */
+struct l4_kwq_offload_pg {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+       u8 op_code;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_code;
+       u8 flags;
+#define L4_KWQ_OFFLOAD_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_OFFLOAD_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_OFFLOAD_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_OFFLOAD_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 l2hdr_nbytes;
+       u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+       u8 da0;
+       u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+       u8 da1;
+       u8 da0;
+       u8 pg_flags;
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP (0x1<<0)
+#define L4_KWQ_OFFLOAD_PG_SNAP_ENCAP_SHIFT 0
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING (0x1<<1)
+#define L4_KWQ_OFFLOAD_PG_VLAN_TAGGING_SHIFT 1
+#define L4_KWQ_OFFLOAD_PG_RESERVED2 (0x3F<<2)
+#define L4_KWQ_OFFLOAD_PG_RESERVED2_SHIFT 2
+       u8 l2hdr_nbytes;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 da2;
+       u8 da3;
+       u8 da4;
+       u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+       u8 da5;
+       u8 da4;
+       u8 da3;
+       u8 da2;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 sa0;
+       u8 sa1;
+       u8 sa2;
+       u8 sa3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 sa3;
+       u8 sa2;
+       u8 sa1;
+       u8 sa0;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 sa4;
+       u8 sa5;
+       u16 etype;
+#elif defined(__LITTLE_ENDIAN)
+       u16 etype;
+       u8 sa5;
+       u8 sa4;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 vlan_tag;
+       u16 ipid_start;
+#elif defined(__LITTLE_ENDIAN)
+       u16 ipid_start;
+       u16 vlan_tag;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 ipid_count;
+       u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved3;
+       u16 ipid_count;
+#endif
+       u32 host_opaque;
+};
+
+
+/*
+ * Abortively close the connection request
+ */
+struct l4_kwq_reset_req {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+       u8 op_code;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_code;
+       u8 flags;
+#define L4_KWQ_RESET_REQ_RESERVED1 (0xF<<0)
+#define L4_KWQ_RESET_REQ_RESERVED1_SHIFT 0
+#define L4_KWQ_RESET_REQ_LAYER_CODE (0x7<<4)
+#define L4_KWQ_RESET_REQ_LAYER_CODE_SHIFT 4
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_RESET_REQ_LINKED_WITH_NEXT_SHIFT 7
+#endif
+       u32 cid;
+       u32 reserved2[6];
+};
+
+
+/*
+ * a KWQE request to update a PG connection
+ */
+struct l4_kwq_update_pg {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+       u8 opcode;
+       u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+       u16 oper16;
+       u8 opcode;
+       u8 flags;
+#define L4_KWQ_UPDATE_PG_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPDATE_PG_RESERVED1_SHIFT 0
+#define L4_KWQ_UPDATE_PG_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPDATE_PG_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPDATE_PG_LINKED_WITH_NEXT_SHIFT 7
+#endif
+       u32 pg_cid;
+       u32 pg_host_opaque;
+#if defined(__BIG_ENDIAN)
+       u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+       u8 pg_unused_a;
+       u16 pg_ipid_count;
+#elif defined(__LITTLE_ENDIAN)
+       u16 pg_ipid_count;
+       u8 pg_unused_a;
+       u8 pg_valids;
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT (0x1<<0)
+#define L4_KWQ_UPDATE_PG_VALIDS_IPID_COUNT_SHIFT 0
+#define L4_KWQ_UPDATE_PG_VALIDS_DA (0x1<<1)
+#define L4_KWQ_UPDATE_PG_VALIDS_DA_SHIFT 1
+#define L4_KWQ_UPDATE_PG_RESERVERD2 (0x3F<<2)
+#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 reserverd3;
+       u8 da0;
+       u8 da1;
+#elif defined(__LITTLE_ENDIAN)
+       u8 da1;
+       u8 da0;
+       u16 reserverd3;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 da2;
+       u8 da3;
+       u8 da4;
+       u8 da5;
+#elif defined(__LITTLE_ENDIAN)
+       u8 da5;
+       u8 da4;
+       u8 da3;
+       u8 da2;
+#endif
+       u32 reserved4;
+       u32 reserved5;
+};
+
+
+/*
+ * a KWQE request to upload a PG or L4 context
+ */
+struct l4_kwq_upload {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+       u8 opcode;
+       u16 oper16;
+#elif defined(__LITTLE_ENDIAN)
+       u16 oper16;
+       u8 opcode;
+       u8 flags;
+#define L4_KWQ_UPLOAD_RESERVED1 (0xF<<0)
+#define L4_KWQ_UPLOAD_RESERVED1_SHIFT 0
+#define L4_KWQ_UPLOAD_LAYER_CODE (0x7<<4)
+#define L4_KWQ_UPLOAD_LAYER_CODE_SHIFT 4
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT (0x1<<7)
+#define L4_KWQ_UPLOAD_LINKED_WITH_NEXT_SHIFT 7
+#endif
+       u32 cid;
+       u32 reserved2[6];
+};
+
+#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
new file mode 100644 (file)
index 0000000..0638096
--- /dev/null
@@ -0,0 +1,299 @@
+/* cnic_if.h: Broadcom CNIC core network driver.
+ *
+ * Copyright (c) 2006 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ */
+
+
+#ifndef CNIC_IF_H
+#define CNIC_IF_H
+
+#define CNIC_MODULE_VERSION    "2.0.0"
+#define CNIC_MODULE_RELDATE    "May 21, 2009"
+
+#define CNIC_ULP_RDMA          0
+#define CNIC_ULP_ISCSI         1
+#define CNIC_ULP_L4            2
+#define MAX_CNIC_ULP_TYPE_EXT  2
+#define MAX_CNIC_ULP_TYPE      3
+
+struct kwqe {
+       u32 kwqe_op_flag;
+
+#define KWQE_OPCODE_MASK       0x00ff0000
+#define KWQE_OPCODE_SHIFT      16
+#define KWQE_FLAGS_LAYER_SHIFT 28
+#define KWQE_OPCODE(x)         ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
+
+       u32 kwqe_info0;
+       u32 kwqe_info1;
+       u32 kwqe_info2;
+       u32 kwqe_info3;
+       u32 kwqe_info4;
+       u32 kwqe_info5;
+       u32 kwqe_info6;
+};
+
+struct kwqe_16 {
+       u32 kwqe_info0;
+       u32 kwqe_info1;
+       u32 kwqe_info2;
+       u32 kwqe_info3;
+};
+
+struct kcqe {
+       u32 kcqe_info0;
+       u32 kcqe_info1;
+       u32 kcqe_info2;
+       u32 kcqe_info3;
+       u32 kcqe_info4;
+       u32 kcqe_info5;
+       u32 kcqe_info6;
+       u32 kcqe_op_flag;
+               #define KCQE_RAMROD_COMPLETION          (0x1<<27) /* Everest */
+               #define KCQE_FLAGS_LAYER_MASK           (0x7<<28)
+               #define KCQE_FLAGS_LAYER_MASK_MISC      (0<<28)
+               #define KCQE_FLAGS_LAYER_MASK_L2        (2<<28)
+               #define KCQE_FLAGS_LAYER_MASK_L3        (3<<28)
+               #define KCQE_FLAGS_LAYER_MASK_L4        (4<<28)
+               #define KCQE_FLAGS_LAYER_MASK_L5_RDMA   (5<<28)
+               #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI  (6<<28)
+               #define KCQE_FLAGS_NEXT                 (1<<31)
+               #define KCQE_FLAGS_OPCODE_MASK          (0xff<<16)
+               #define KCQE_FLAGS_OPCODE_SHIFT         (16)
+               #define KCQE_OPCODE(op)                 \
+               (((op) & KCQE_FLAGS_OPCODE_MASK) >> KCQE_FLAGS_OPCODE_SHIFT)
+};
+
+#define MAX_CNIC_CTL_DATA      64
+#define MAX_DRV_CTL_DATA       64
+
+#define CNIC_CTL_STOP_CMD              1
+#define CNIC_CTL_START_CMD             2
+#define CNIC_CTL_COMPLETION_CMD                3
+
+#define DRV_CTL_IO_WR_CMD              0x101
+#define DRV_CTL_IO_RD_CMD              0x102
+#define DRV_CTL_CTX_WR_CMD             0x103
+#define DRV_CTL_CTXTBL_WR_CMD          0x104
+#define DRV_CTL_COMPLETION_CMD         0x105
+
+struct cnic_ctl_completion {
+       u32     cid;
+};
+
+struct drv_ctl_completion {
+       u32     comp_count;
+};
+
+struct cnic_ctl_info {
+       int     cmd;
+       union {
+               struct cnic_ctl_completion comp;
+               char bytes[MAX_CNIC_CTL_DATA];
+       } data;
+};
+
+struct drv_ctl_io {
+       u32             cid_addr;
+       u32             offset;
+       u32             data;
+       dma_addr_t      dma_addr;
+};
+
+struct drv_ctl_info {
+       int     cmd;
+       union {
+               struct drv_ctl_completion comp;
+               struct drv_ctl_io io;
+               char bytes[MAX_DRV_CTL_DATA];
+       } data;
+};
+
+struct cnic_ops {
+       struct module   *cnic_owner;
+       /* Calls to these functions are protected by RCU.  When
+        * unregistering, we wait for any calls to complete before
+        * continuing.
+        */
+       int             (*cnic_handler)(void *, void *);
+       int             (*cnic_ctl)(void *, struct cnic_ctl_info *);
+};
+
+#define MAX_CNIC_VEC   8
+
+struct cnic_irq {
+       unsigned int    vector;
+       void            *status_blk;
+       u32             status_blk_num;
+       u32             irq_flags;
+#define CNIC_IRQ_FL_MSIX               0x00000001
+};
+
+struct cnic_eth_dev {
+       struct module   *drv_owner;
+       u32             drv_state;
+#define CNIC_DRV_STATE_REGD            0x00000001
+#define CNIC_DRV_STATE_USING_MSIX      0x00000002
+       u32             chip_id;
+       u32             max_kwqe_pending;
+       struct pci_dev  *pdev;
+       void __iomem    *io_base;
+
+       u32             ctx_tbl_offset;
+       u32             ctx_tbl_len;
+       int             ctx_blk_size;
+       u32             starting_cid;
+       u32             max_iscsi_conn;
+       u32             max_fcoe_conn;
+       u32             max_rdma_conn;
+       u32             reserved0[2];
+
+       int             num_irq;
+       struct cnic_irq irq_arr[MAX_CNIC_VEC];
+       int             (*drv_register_cnic)(struct net_device *,
+                                            struct cnic_ops *, void *);
+       int             (*drv_unregister_cnic)(struct net_device *);
+       int             (*drv_submit_kwqes_32)(struct net_device *,
+                                              struct kwqe *[], u32);
+       int             (*drv_submit_kwqes_16)(struct net_device *,
+                                              struct kwqe_16 *[], u32);
+       int             (*drv_ctl)(struct net_device *, struct drv_ctl_info *);
+       unsigned long   reserved1[2];
+};
+
+struct cnic_sockaddr {
+       union {
+               struct sockaddr_in      v4;
+               struct sockaddr_in6     v6;
+       } local;
+       union {
+               struct sockaddr_in      v4;
+               struct sockaddr_in6     v6;
+       } remote;
+};
+
+struct cnic_sock {
+       struct cnic_dev *dev;
+       void    *context;
+       u32     src_ip[4];
+       u32     dst_ip[4];
+       u16     src_port;
+       u16     dst_port;
+       u16     vlan_id;
+       unsigned char old_ha[6];
+       unsigned char ha[6];
+       u32     mtu;
+       u32     cid;
+       u32     l5_cid;
+       u32     pg_cid;
+       int     ulp_type;
+
+       u32     ka_timeout;
+       u32     ka_interval;
+       u8      ka_max_probe_count;
+       u8      tos;
+       u8      ttl;
+       u8      snd_seq_scale;
+       u32     rcv_buf;
+       u32     snd_buf;
+       u32     seed;
+
+       unsigned long   tcp_flags;
+#define SK_TCP_NO_DELAY_ACK    0x1
+#define SK_TCP_KEEP_ALIVE      0x2
+#define SK_TCP_NAGLE           0x4
+#define SK_TCP_TIMESTAMP       0x8
+#define SK_TCP_SACK            0x10
+#define SK_TCP_SEG_SCALING     0x20
+       unsigned long   flags;
+#define SK_F_INUSE             0
+#define SK_F_OFFLD_COMPLETE    1
+#define SK_F_OFFLD_SCHED       2
+#define SK_F_PG_OFFLD_COMPLETE 3
+#define SK_F_CONNECT_START     4
+#define SK_F_IPV6              5
+#define SK_F_CLOSING           7
+
+       atomic_t ref_count;
+       u32 state;
+       struct kwqe kwqe1;
+       struct kwqe kwqe2;
+       struct kwqe kwqe3;
+};
+
+struct cnic_dev {
+       struct net_device       *netdev;
+       struct pci_dev          *pcidev;
+       void __iomem            *regview;
+       struct list_head        list;
+
+       int (*register_device)(struct cnic_dev *dev, int ulp_type,
+                              void *ulp_ctx);
+       int (*unregister_device)(struct cnic_dev *dev, int ulp_type);
+       int (*submit_kwqes)(struct cnic_dev *dev, struct kwqe *wqes[],
+                               u32 num_wqes);
+       int (*submit_kwqes_16)(struct cnic_dev *dev, struct kwqe_16 *wqes[],
+                               u32 num_wqes);
+
+       int (*cm_create)(struct cnic_dev *, int, u32, u32, struct cnic_sock **,
+                        void *);
+       int (*cm_destroy)(struct cnic_sock *);
+       int (*cm_connect)(struct cnic_sock *, struct cnic_sockaddr *);
+       int (*cm_abort)(struct cnic_sock *);
+       int (*cm_close)(struct cnic_sock *);
+       struct cnic_dev *(*cm_select_dev)(struct sockaddr_in *, int ulp_type);
+       int (*iscsi_nl_msg_recv)(struct cnic_dev *dev, u32 msg_type,
+                                char *data, u16 data_size);
+       unsigned long   flags;
+#define CNIC_F_CNIC_UP         1
+#define CNIC_F_BNX2_CLASS      3
+#define CNIC_F_BNX2X_CLASS     4
+       atomic_t        ref_count;
+       u8              mac_addr[6];
+
+       int             max_iscsi_conn;
+       int             max_fcoe_conn;
+       int             max_rdma_conn;
+
+       void            *cnic_priv;
+};
+
+#define CNIC_WR(dev, off, val)         writel(val, dev->regview + off)
+#define CNIC_WR16(dev, off, val)       writew(val, dev->regview + off)
+#define CNIC_WR8(dev, off, val)                writeb(val, dev->regview + off)
+#define CNIC_RD(dev, off)              readl(dev->regview + off)
+#define CNIC_RD16(dev, off)            readw(dev->regview + off)
+
+struct cnic_ulp_ops {
+       /* Calls to these functions are protected by RCU.  When
+        * unregistering, we wait for any calls to complete before
+        * continuing.
+        */
+
+       void (*cnic_init)(struct cnic_dev *dev);
+       void (*cnic_exit)(struct cnic_dev *dev);
+       void (*cnic_start)(void *ulp_ctx);
+       void (*cnic_stop)(void *ulp_ctx);
+       void (*indicate_kcqes)(void *ulp_ctx, struct kcqe *cqes[],
+                               u32 num_cqes);
+       void (*indicate_netevent)(void *ulp_ctx, unsigned long event);
+       void (*cm_connect_complete)(struct cnic_sock *);
+       void (*cm_close_complete)(struct cnic_sock *);
+       void (*cm_abort_complete)(struct cnic_sock *);
+       void (*cm_remote_close)(struct cnic_sock *);
+       void (*cm_remote_abort)(struct cnic_sock *);
+       void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type,
+                                 char *data, u16 data_size);
+       struct module *owner;
+};
+
+extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
+
+extern int cnic_unregister_driver(int ulp_type);
+
+#endif
index 733fe3bf6285f0e20c6d6737b600beac7ec7332e..b2fe5cdbcaeec3cb4462b22040fa6282bd70c7b5 100644 (file)
 
 #include "zfcp_ext.h"
 
+#define ZFCP_MODEL_PRIV 0x4
+
+static struct ccw_device_id zfcp_ccw_device_id[] = {
+       { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
+       { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) },
+       {},
+};
+MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
+
+/**
+ * zfcp_ccw_priv_sch - check if subchannel is privileged
+ * @adapter: Adapter/Subchannel to check
+ */
+int zfcp_ccw_priv_sch(struct zfcp_adapter *adapter)
+{
+       return adapter->ccw_device->id.dev_model == ZFCP_MODEL_PRIV;
+}
+
 /**
  * zfcp_ccw_probe - probe function of zfcp driver
  * @ccw_device: pointer to belonging ccw device
@@ -176,8 +194,8 @@ static int zfcp_ccw_notify(struct ccw_device *ccw_device, int event)
                                        "ccnoti4", NULL);
                break;
        case CIO_BOXED:
-               dev_warn(&adapter->ccw_device->dev,
-                        "The ccw device did not respond in time.\n");
+               dev_warn(&adapter->ccw_device->dev, "The FCP device "
+                        "did not respond within the specified time\n");
                zfcp_erp_adapter_shutdown(adapter, 0, "ccnoti5", NULL);
                break;
        }
@@ -199,14 +217,6 @@ static void zfcp_ccw_shutdown(struct ccw_device *cdev)
        up(&zfcp_data.config_sema);
 }
 
-static struct ccw_device_id zfcp_ccw_device_id[] = {
-       { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) },
-       { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x4) }, /* priv. */
-       {},
-};
-
-MODULE_DEVICE_TABLE(ccw, zfcp_ccw_device_id);
-
 static struct ccw_driver zfcp_ccw_driver = {
        .owner       = THIS_MODULE,
        .name        = "zfcp",
index 0a1a5dd8d01831b9e13986ba9fd553b7ffc55342..b99b87ce5a39618d8236167fd98b4db6e43a9a3b 100644 (file)
@@ -163,7 +163,7 @@ void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
        }
 
        response->fsf_command = fsf_req->fsf_command;
-       response->fsf_reqid = (unsigned long)fsf_req;
+       response->fsf_reqid = fsf_req->req_id;
        response->fsf_seqno = fsf_req->seq_no;
        response->fsf_issued = fsf_req->issued;
        response->fsf_prot_status = qtcb->prefix.prot_status;
@@ -737,7 +737,7 @@ void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
        spin_lock_irqsave(&adapter->san_dbf_lock, flags);
        memset(r, 0, sizeof(*r));
        strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
-       r->fsf_reqid = (unsigned long)fsf_req;
+       r->fsf_reqid = fsf_req->req_id;
        r->fsf_seqno = fsf_req->seq_no;
        r->s_id = fc_host_port_id(adapter->scsi_host);
        r->d_id = wka_port->d_id;
@@ -773,7 +773,7 @@ void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
        spin_lock_irqsave(&adapter->san_dbf_lock, flags);
        memset(r, 0, sizeof(*r));
        strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
-       r->fsf_reqid = (unsigned long)fsf_req;
+       r->fsf_reqid = fsf_req->req_id;
        r->fsf_seqno = fsf_req->seq_no;
        r->s_id = wka_port->d_id;
        r->d_id = fc_host_port_id(adapter->scsi_host);
@@ -803,7 +803,7 @@ static void zfcp_san_dbf_event_els(const char *tag, int level,
        spin_lock_irqsave(&adapter->san_dbf_lock, flags);
        memset(rec, 0, sizeof(*rec));
        strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
-       rec->fsf_reqid = (unsigned long)fsf_req;
+       rec->fsf_reqid = fsf_req->req_id;
        rec->fsf_seqno = fsf_req->seq_no;
        rec->s_id = s_id;
        rec->d_id = d_id;
@@ -965,7 +965,7 @@ static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
                                                      ZFCP_DBF_SCSI_FCP_SNS_INFO);
                                }
 
-                               rec->fsf_reqid = (unsigned long)fsf_req;
+                               rec->fsf_reqid = fsf_req->req_id;
                                rec->fsf_seqno = fsf_req->seq_no;
                                rec->fsf_issued = fsf_req->issued;
                        }
index 4c362a9069f07463c8db6869a86575099024300a..2074d45dbf6c4b1a426b5f9aafc39a11e1c41fe9 100644 (file)
 
 /********************* CIO/QDIO SPECIFIC DEFINES *****************************/
 
-/* Adapter Identification Parameters */
-#define ZFCP_CONTROL_UNIT_TYPE  0x1731
-#define ZFCP_CONTROL_UNIT_MODEL 0x03
-#define ZFCP_DEVICE_TYPE        0x1732
-#define ZFCP_DEVICE_MODEL       0x03
-#define ZFCP_DEVICE_MODEL_PRIV 0x04
-
 /* DMQ bug workaround: don't use last SBALE */
 #define ZFCP_MAX_SBALES_PER_SBAL       (QDIO_MAX_ELEMENTS_PER_BUFFER - 1)
 
index fdc9b4352a6493e888ffb9ac4535619c2b379bc9..e50ea465bc2b993b44829a7d1554259e475fa2da 100644 (file)
@@ -880,6 +880,7 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
                                zfcp_port_put(port);
                        return ZFCP_ERP_CONTINUES;
                }
+               /* fall through */
        case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
                if (!port->d_id)
                        return ZFCP_ERP_FAILED;
@@ -894,8 +895,13 @@ static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
                                act->step = ZFCP_ERP_STEP_PORT_CLOSING;
                                return ZFCP_ERP_CONTINUES;
                        }
-               /* fall through otherwise */
                }
+               if (port->d_id && !(p_status & ZFCP_STATUS_COMMON_NOESC)) {
+                       port->d_id = 0;
+                       _zfcp_erp_port_reopen(port, 0, "erpsoc1", NULL);
+                       return ZFCP_ERP_EXIT;
+               }
+               /* fall through otherwise */
        }
        return ZFCP_ERP_FAILED;
 }
index 2e31b536548c2e28344831c3f4266f32c70c5c6b..120a9a1c81f7cf491e12b208d48c346ab5586780 100644 (file)
@@ -27,6 +27,7 @@ extern int zfcp_sg_setup_table(struct scatterlist *, int);
 
 /* zfcp_ccw.c */
 extern int zfcp_ccw_register(void);
+extern int zfcp_ccw_priv_sch(struct zfcp_adapter *);
 extern struct zfcp_adapter *zfcp_get_adapter_by_busid(char *);
 
 /* zfcp_cfdc.c */
index 19ae0842047c4750f0614479381c39b76d40d7ca..bb2752b4130fde32f21982248171b895e5f5c3cc 100644 (file)
@@ -150,9 +150,14 @@ static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
        struct zfcp_port *port;
 
        read_lock_irqsave(&zfcp_data.config_lock, flags);
-       list_for_each_entry(port, &fsf_req->adapter->port_list_head, list)
+       list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
                if ((port->d_id & range) == (elem->nport_did & range))
                        zfcp_test_link(port);
+               if (!port->d_id)
+                       zfcp_erp_port_reopen(port,
+                                            ZFCP_STATUS_COMMON_ERP_FAILED,
+                                            "fcrscn1", NULL);
+       }
 
        read_unlock_irqrestore(&zfcp_data.config_lock, flags);
 }
index 74dee32afba84619d564055909e26d41954e5ef3..e6dae3744e7981c348ddd554dbdf6dd27049c98f 100644 (file)
@@ -526,6 +526,7 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
                break;
        case FSF_TOPO_AL:
                fc_host_port_type(shost) = FC_PORTTYPE_NLPORT;
+               /* fall through */
        default:
                dev_err(&adapter->ccw_device->dev,
                        "Unknown or unsupported arbitrated loop "
@@ -897,6 +898,7 @@ static void zfcp_fsf_abort_fcp_command_handler(struct zfcp_fsf_req *req)
                switch (fsq->word[0]) {
                case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
                        zfcp_test_link(unit->port);
+                       /* fall through */
                case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
                        req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                        break;
@@ -993,6 +995,7 @@ static void zfcp_fsf_send_ct_handler(struct zfcp_fsf_req *req)
                break;
        case FSF_PORT_HANDLE_NOT_VALID:
                zfcp_erp_adapter_reopen(adapter, 0, "fsscth1", req);
+               /* fall through */
        case FSF_GENERIC_COMMAND_REJECTED:
        case FSF_PAYLOAD_SIZE_MISMATCH:
        case FSF_REQUEST_SIZE_TOO_LARGE:
@@ -1399,7 +1402,7 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
        struct fsf_plogi *plogi;
 
        if (req->status & ZFCP_STATUS_FSFREQ_ERROR)
-               return;
+               goto out;
 
        switch (header->fsf_status) {
        case FSF_PORT_ALREADY_OPEN:
@@ -1461,6 +1464,9 @@ static void zfcp_fsf_open_port_handler(struct zfcp_fsf_req *req)
                req->status |= ZFCP_STATUS_FSFREQ_ERROR;
                break;
        }
+
+out:
+       zfcp_port_put(port);
 }
 
 /**
@@ -1473,6 +1479,7 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
        struct qdio_buffer_element *sbale;
        struct zfcp_adapter *adapter = erp_action->adapter;
        struct zfcp_fsf_req *req;
+       struct zfcp_port *port = erp_action->port;
        int retval = -EIO;
 
        spin_lock_bh(&adapter->req_q_lock);
@@ -1493,16 +1500,18 @@ int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
        req->handler = zfcp_fsf_open_port_handler;
-       req->qtcb->bottom.support.d_id = erp_action->port->d_id;
-       req->data = erp_action->port;
+       req->qtcb->bottom.support.d_id = port->d_id;
+       req->data = port;
        req->erp_action = erp_action;
        erp_action->fsf_req = req;
+       zfcp_port_get(port);
 
        zfcp_fsf_start_erp_timer(req);
        retval = zfcp_fsf_req_send(req);
        if (retval) {
                zfcp_fsf_req_free(req);
                erp_action->fsf_req = NULL;
+               zfcp_port_put(port);
        }
 out:
        spin_unlock_bh(&adapter->req_q_lock);
@@ -1590,8 +1599,10 @@ static void zfcp_fsf_open_wka_port_handler(struct zfcp_fsf_req *req)
        case FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED:
                dev_warn(&req->adapter->ccw_device->dev,
                         "Opening WKA port 0x%x failed\n", wka_port->d_id);
+               /* fall through */
        case FSF_ADAPTER_STATUS_AVAILABLE:
                req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+               /* fall through */
        case FSF_ACCESS_DENIED:
                wka_port->status = ZFCP_WKA_PORT_OFFLINE;
                break;
@@ -1876,7 +1887,7 @@ static void zfcp_fsf_open_unit_handler(struct zfcp_fsf_req *req)
 
                if (!(adapter->connection_features & FSF_FEATURE_NPIV_MODE) &&
                    (adapter->adapter_features & FSF_FEATURE_LUN_SHARING) &&
-                   (adapter->ccw_device->id.dev_model != ZFCP_DEVICE_MODEL_PRIV)) {
+                   !zfcp_ccw_priv_sch(adapter)) {
                        exclusive = (bottom->lun_access_info &
                                        FSF_UNIT_ACCESS_EXCLUSIVE);
                        readwrite = (bottom->lun_access_info &
@@ -2314,7 +2325,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
 {
        struct zfcp_fsf_req *req;
        struct fcp_cmnd_iu *fcp_cmnd_iu;
-       unsigned int sbtype;
+       unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
        int real_bytes, retval = -EIO;
        struct zfcp_adapter *adapter = unit->port->adapter;
 
@@ -2356,11 +2367,9 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
        switch (scsi_cmnd->sc_data_direction) {
        case DMA_NONE:
                req->qtcb->bottom.io.data_direction = FSF_DATADIR_CMND;
-               sbtype = SBAL_FLAGS0_TYPE_READ;
                break;
        case DMA_FROM_DEVICE:
                req->qtcb->bottom.io.data_direction = FSF_DATADIR_READ;
-               sbtype = SBAL_FLAGS0_TYPE_READ;
                fcp_cmnd_iu->rddata = 1;
                break;
        case DMA_TO_DEVICE:
@@ -2369,8 +2378,6 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
                fcp_cmnd_iu->wddata = 1;
                break;
        case DMA_BIDIRECTIONAL:
-       default:
-               retval = -EIO;
                goto failed_scsi_cmnd;
        }
 
@@ -2394,9 +2401,7 @@ int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *unit,
                                             scsi_sglist(scsi_cmnd),
                                             FSF_MAX_SBALS_PER_REQ);
        if (unlikely(real_bytes < 0)) {
-               if (req->sbal_number < FSF_MAX_SBALS_PER_REQ)
-                       retval = -EIO;
-               else {
+               if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) {
                        dev_err(&adapter->ccw_device->dev,
                                "Oversize data package, unit 0x%016Lx "
                                "on port 0x%016Lx closed\n",
index e8fbeaeb5fbfb4046d1a04ed3cab14e642bdce2a..7d0da230eb637ce92468f2d6565e1b04be4fdde3 100644 (file)
 #include "zfcp_ext.h"
 #include <asm/atomic.h>
 
+static unsigned int default_depth = 32;
+module_param_named(queue_depth, default_depth, uint, 0600);
+MODULE_PARM_DESC(queue_depth, "Default queue depth for new SCSI devices");
+
 /* Find start of Sense Information in FCP response unit*/
 char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
 {
@@ -24,6 +28,12 @@ char *zfcp_get_fcp_sns_info_ptr(struct fcp_rsp_iu *fcp_rsp_iu)
        return fcp_sns_info_ptr;
 }
 
+static int zfcp_scsi_change_queue_depth(struct scsi_device *sdev, int depth)
+{
+       scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), depth);
+       return sdev->queue_depth;
+}
+
 static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
 {
        struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
@@ -34,7 +44,7 @@ static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
 static int zfcp_scsi_slave_configure(struct scsi_device *sdp)
 {
        if (sdp->tagged_supported)
-               scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, 32);
+               scsi_adjust_queue_depth(sdp, MSG_SIMPLE_TAG, default_depth);
        else
                scsi_adjust_queue_depth(sdp, 0, 1);
        return 0;
@@ -647,6 +657,7 @@ struct zfcp_data zfcp_data = {
                .name                    = "zfcp",
                .module                  = THIS_MODULE,
                .proc_name               = "zfcp",
+               .change_queue_depth      = zfcp_scsi_change_queue_depth,
                .slave_alloc             = zfcp_scsi_slave_alloc,
                .slave_configure         = zfcp_scsi_slave_configure,
                .slave_destroy           = zfcp_scsi_slave_destroy,
index fb2740789b6879d48ce2501272e3ddc53337108c..6a19ed9a1194aff51553bed7ad9384f5925d6779 100644 (file)
@@ -191,20 +191,19 @@ config SCSI_ENCLOSURE
          it has an enclosure device.  Selecting this option will just allow
          certain enclosure conditions to be reported and is not required.
 
-comment "Some SCSI devices (e.g. CD jukebox) support multiple LUNs"
-       depends on SCSI
-
 config SCSI_MULTI_LUN
        bool "Probe all LUNs on each SCSI device"
        depends on SCSI
        help
-         If you have a SCSI device that supports more than one LUN (Logical
-         Unit Number), e.g. a CD jukebox, and only one LUN is detected, you
-         can say Y here to force the SCSI driver to probe for multiple LUNs.
-         A SCSI device with multiple LUNs acts logically like multiple SCSI
-         devices. The vast majority of SCSI devices have only one LUN, and
-         so most people can say N here. The max_luns boot/module parameter 
-         allows to override this setting.
+         Some devices support more than one LUN (Logical Unit Number) in order
+         to allow access to several media, e.g. CD jukebox, USB card reader,
+         mobile phone in mass storage mode. This option forces the kernel to
+         probe for all LUNs by default. This setting can be overriden by
+         max_luns boot/module parameter. Note that this option does not affect
+         devices conforming to SCSI-3 or higher as they can explicitely report
+         their number of LUNs. It is safe to say Y here unless you have one of
+         those rare devices which reacts in an unexpected way when probed for
+         multiple LUNs.
 
 config SCSI_CONSTANTS
        bool "Verbose SCSI error reporting (kernel size +=12K)"
@@ -355,6 +354,7 @@ config ISCSI_TCP
         http://open-iscsi.org
 
 source "drivers/scsi/cxgb3i/Kconfig"
+source "drivers/scsi/bnx2i/Kconfig"
 
 config SGIWD93_SCSI
        tristate "SGI WD93C93 SCSI Driver"
@@ -508,6 +508,7 @@ config SCSI_AIC7XXX_OLD
 
 source "drivers/scsi/aic7xxx/Kconfig.aic79xx"
 source "drivers/scsi/aic94xx/Kconfig"
+source "drivers/scsi/mvsas/Kconfig"
 
 config SCSI_DPT_I2O
        tristate "Adaptec I2O RAID support "
@@ -1050,16 +1051,6 @@ config SCSI_IZIP_SLOW_CTR
 
          Generally, saying N is fine.
 
-config SCSI_MVSAS
-       tristate "Marvell 88SE6440 SAS/SATA support"
-       depends on PCI && SCSI
-       select SCSI_SAS_LIBSAS
-       help
-         This driver supports Marvell SAS/SATA PCI devices.
-
-         To compiler this driver as a module, choose M here: the module
-         will be called mvsas.
-
 config SCSI_NCR53C406A
        tristate "NCR53c406a SCSI support"
        depends on ISA && SCSI
index a5049cfb40edb1c4e3c69aa9e4e95076590a0e57..25429ea63d0ad0708d236e1242819b6fc152e676 100644 (file)
@@ -126,9 +126,10 @@ obj-$(CONFIG_SCSI_IBMVSCSIS)       += ibmvscsi/
 obj-$(CONFIG_SCSI_IBMVFC)      += ibmvscsi/
 obj-$(CONFIG_SCSI_HPTIOP)      += hptiop.o
 obj-$(CONFIG_SCSI_STEX)                += stex.o
-obj-$(CONFIG_SCSI_MVSAS)       += mvsas.o
+obj-$(CONFIG_SCSI_MVSAS)       += mvsas/
 obj-$(CONFIG_PS3_ROM)          += ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI) += libiscsi.o libiscsi_tcp.o cxgb3i/
+obj-$(CONFIG_SCSI_BNX2_ISCSI)  += libiscsi.o bnx2i/
 
 obj-$(CONFIG_ARM)              += arm/
 
index c889d8458684b002d5e03bd9bebf3a4e639cf4c4..1cdf09a4779a560532e7dc2edafc4dc4f91e2170 100644 (file)
@@ -224,7 +224,7 @@ NCR_D700_probe_one(struct NCR_D700_private *p, int siop, int irq,
        return ret;
 }
 
-static int
+static irqreturn_t
 NCR_D700_intr(int irq, void *data)
 {
        struct NCR_D700_private *p = (struct NCR_D700_private *)data;
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_constants.h b/drivers/scsi/bnx2i/57xx_iscsi_constants.h
new file mode 100644 (file)
index 0000000..2fceb19
--- /dev/null
@@ -0,0 +1,155 @@
+/* 57xx_iscsi_constants.h: Broadcom NetXtreme II iSCSI HSI
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_CONSTANTS_H_
+#define __57XX_ISCSI_CONSTANTS_H_
+
+/**
+* This file defines HSI constants for the iSCSI flows
+*/
+
+/* iSCSI request op codes */
+#define ISCSI_OPCODE_CLEANUP_REQUEST    (7)
+
+/* iSCSI response/messages op codes */
+#define ISCSI_OPCODE_CLEANUP_RESPONSE          (0x27)
+#define ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION    (0)
+
+/* iSCSI task types */
+#define ISCSI_TASK_TYPE_READ    (0)
+#define ISCSI_TASK_TYPE_WRITE   (1)
+#define ISCSI_TASK_TYPE_MPATH   (2)
+
+/* initial CQ sequence numbers */
+#define ISCSI_INITIAL_SN    (1)
+
+/* KWQ (kernel work queue) layer codes */
+#define ISCSI_KWQE_LAYER_CODE   (6)
+
+/* KWQ (kernel work queue) request op codes */
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN1 (0)
+#define ISCSI_KWQE_OPCODE_OFFLOAD_CONN2 (1)
+#define ISCSI_KWQE_OPCODE_UPDATE_CONN   (2)
+#define ISCSI_KWQE_OPCODE_DESTROY_CONN  (3)
+#define ISCSI_KWQE_OPCODE_INIT1         (4)
+#define ISCSI_KWQE_OPCODE_INIT2         (5)
+
+/* KCQ (kernel completion queue) response op codes */
+#define ISCSI_KCQE_OPCODE_OFFLOAD_CONN  (0x10)
+#define ISCSI_KCQE_OPCODE_UPDATE_CONN   (0x12)
+#define ISCSI_KCQE_OPCODE_DESTROY_CONN  (0x13)
+#define ISCSI_KCQE_OPCODE_INIT          (0x14)
+#define ISCSI_KCQE_OPCODE_FW_CLEAN_TASK        (0x15)
+#define ISCSI_KCQE_OPCODE_TCP_RESET     (0x16)
+#define ISCSI_KCQE_OPCODE_TCP_SYN       (0x17)
+#define ISCSI_KCQE_OPCODE_TCP_FIN       (0X18)
+#define ISCSI_KCQE_OPCODE_TCP_ERROR     (0x19)
+#define ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
+#define ISCSI_KCQE_OPCODE_ISCSI_ERROR   (0x21)
+
+/* KCQ (kernel completion queue) completion status */
+#define ISCSI_KCQE_COMPLETION_STATUS_SUCCESS                            (0x0)
+#define ISCSI_KCQE_COMPLETION_STATUS_INVALID_OPCODE                     (0x1)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE                  (0x2)
+#define ISCSI_KCQE_COMPLETION_STATUS_CTX_FREE_FAILURE                   (0x3)
+#define ISCSI_KCQE_COMPLETION_STATUS_NIC_ERROR                          (0x4)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR                        (0x5)
+#define ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR                       (0x6)
+
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_UNEXPECTED_OPCODE     (0xa)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE                (0xb)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN               (0xc)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT                   (0xd)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN                (0xe)
+
+/* Response */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN            (0xf)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T              (0x10)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_IS_ZERO  (0x2c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_TOO_BIG  (0x2d)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0                 (0x11)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1                 (0x12)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2                 (0x13)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3                 (0x14)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4                 (0x15)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5                 (0x16)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6                 (0x17)
+
+/* Data-In */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN        (0x18)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN       (0x19)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO            (0x1a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV          (0x1b)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN                (0x1c)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN      (0x1d)
+
+/* R2T */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF            (0x1f)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN                   (0x20)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN                 (0x21)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0 (0x22)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1 (0x23)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED       (0x24)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV           (0x25)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN         (0x26)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO (0x27)
+
+/* TMF */
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN        (0x28)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN         (0x29)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN         (0x2a)
+#define ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP   (0x2b)
+
+/* IP/TCP processing errors: */
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT               (0x40)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS                (0x41)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG               (0x42)
+#define ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_MAX_RTRANS                (0x43)
+
+/* iSCSI licensing errors */
+/* general iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED                (0x50)
+/* additional LOM specific iSCSI license not installed */
+#define ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED              (0x51)
+
+/* SQ/RQ/CQ DB structure sizes */
+#define ISCSI_SQ_DB_SIZE    (16)
+#define ISCSI_RQ_DB_SIZE    (16)
+#define ISCSI_CQ_DB_SIZE    (80)
+
+#define ISCSI_SQN_TO_NOTIFY_NOT_VALID                                   0xFFFF
+
+/* Page size codes (for flags field in connection offload request) */
+#define ISCSI_PAGE_SIZE_256     (0)
+#define ISCSI_PAGE_SIZE_512     (1)
+#define ISCSI_PAGE_SIZE_1K      (2)
+#define ISCSI_PAGE_SIZE_2K      (3)
+#define ISCSI_PAGE_SIZE_4K      (4)
+#define ISCSI_PAGE_SIZE_8K      (5)
+#define ISCSI_PAGE_SIZE_16K     (6)
+#define ISCSI_PAGE_SIZE_32K     (7)
+#define ISCSI_PAGE_SIZE_64K     (8)
+#define ISCSI_PAGE_SIZE_128K    (9)
+#define ISCSI_PAGE_SIZE_256K    (10)
+#define ISCSI_PAGE_SIZE_512K    (11)
+#define ISCSI_PAGE_SIZE_1M      (12)
+#define ISCSI_PAGE_SIZE_2M      (13)
+#define ISCSI_PAGE_SIZE_4M      (14)
+#define ISCSI_PAGE_SIZE_8M      (15)
+
+/* Iscsi PDU related defines */
+#define ISCSI_HEADER_SIZE   (48)
+#define ISCSI_DIGEST_SHIFT  (2)
+#define ISCSI_DIGEST_SIZE   (4)
+
+#define B577XX_ISCSI_CONNECTION_TYPE    3
+
+#endif /*__57XX_ISCSI_CONSTANTS_H_ */
diff --git a/drivers/scsi/bnx2i/57xx_iscsi_hsi.h b/drivers/scsi/bnx2i/57xx_iscsi_hsi.h
new file mode 100644 (file)
index 0000000..36af1af
--- /dev/null
@@ -0,0 +1,1509 @@
+/* 57xx_iscsi_hsi.h: Broadcom NetXtreme II iSCSI HSI.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+#ifndef __57XX_ISCSI_HSI_LINUX_LE__
+#define __57XX_ISCSI_HSI_LINUX_LE__
+
+/*
+ * iSCSI Async CQE
+ */
+struct bnx2i_async_msg {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 reserved2;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved5;
+       u8 err_code;
+       u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved4;
+       u8 err_code;
+       u16 reserved5;
+#endif
+       u32 reserved6;
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u8 async_event;
+       u8 async_vcode;
+       u16 param1;
+#elif defined(__LITTLE_ENDIAN)
+       u16 param1;
+       u8 async_vcode;
+       u8 async_event;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 param2;
+       u16 param3;
+#elif defined(__LITTLE_ENDIAN)
+       u16 param3;
+       u16 param2;
+#endif
+       u32 reserved7[3];
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Buffer Descriptor (BD)
+ */
+struct iscsi_bd {
+       u32 buffer_addr_hi;
+       u32 buffer_addr_lo;
+#if defined(__BIG_ENDIAN)
+       u16 reserved0;
+       u16 buffer_length;
+#elif defined(__LITTLE_ENDIAN)
+       u16 buffer_length;
+       u16 reserved0;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 reserved3;
+       u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+#elif defined(__LITTLE_ENDIAN)
+       u16 flags;
+#define ISCSI_BD_RESERVED1 (0x3F<<0)
+#define ISCSI_BD_RESERVED1_SHIFT 0
+#define ISCSI_BD_LAST_IN_BD_CHAIN (0x1<<6)
+#define ISCSI_BD_LAST_IN_BD_CHAIN_SHIFT 6
+#define ISCSI_BD_FIRST_IN_BD_CHAIN (0x1<<7)
+#define ISCSI_BD_FIRST_IN_BD_CHAIN_SHIFT 7
+#define ISCSI_BD_RESERVED2 (0xFF<<8)
+#define ISCSI_BD_RESERVED2_SHIFT 8
+       u16 reserved3;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup SQ WQE
+ */
+struct bnx2i_cleanup_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 reserved2[3];
+#if defined(__BIG_ENDIAN)
+       u16 reserved3;
+       u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_CLEANUP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_REQUEST_TYPE_SHIFT 14
+       u16 reserved3;
+#endif
+       u32 reserved4[10];
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved6;
+       u16 reserved5;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved5;
+       u8 reserved6;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Cleanup CQE
+ */
+struct bnx2i_cleanup_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 status;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 status;
+       u8 op_code;
+#endif
+       u32 reserved1[3];
+       u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5[7];
+#if defined(__BIG_ENDIAN)
+       u16 reserved6;
+       u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_CLEANUP_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CLEANUP_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CLEANUP_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CLEANUP_RESPONSE_TYPE_SHIFT 14
+       u16 reserved6;
+#endif
+       u32 cq_req_sn;
+};
+
+
+/*
+ * SCSI read/write SQ WQE
+ */
+struct bnx2i_cmd_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_CMD_REQUEST_TASK_ATTR (0x7<<0)
+#define ISCSI_CMD_REQUEST_TASK_ATTR_SHIFT 0
+#define ISCSI_CMD_REQUEST_RESERVED1 (0x3<<3)
+#define ISCSI_CMD_REQUEST_RESERVED1_SHIFT 3
+#define ISCSI_CMD_REQUEST_WRITE (0x1<<5)
+#define ISCSI_CMD_REQUEST_WRITE_SHIFT 5
+#define ISCSI_CMD_REQUEST_READ (0x1<<6)
+#define ISCSI_CMD_REQUEST_READ_SHIFT 6
+#define ISCSI_CMD_REQUEST_FINAL (0x1<<7)
+#define ISCSI_CMD_REQUEST_FINAL_SHIFT 7
+       u8 op_code;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 ud_buffer_offset;
+       u16 sd_buffer_offset;
+#elif defined(__LITTLE_ENDIAN)
+       u16 sd_buffer_offset;
+       u16 ud_buffer_offset;
+#endif
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved2;
+       u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_CMD_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_REQUEST_INDEX_SHIFT 0
+#define ISCSI_CMD_REQUEST_TYPE (0x3<<14)
+#define ISCSI_CMD_REQUEST_TYPE_SHIFT 14
+       u16 reserved2;
+#endif
+       u32 total_data_transfer_length;
+       u32 cmd_sn;
+       u32 reserved3;
+       u32 cdb[4];
+       u32 zero_fill;
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 sd_start_bd_index;
+       u8 ud_start_bd_index;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 ud_start_bd_index;
+       u8 sd_start_bd_index;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * task statistics for write response
+ */
+struct bnx2i_write_resp_task_stat {
+       u32 num_data_ins;
+};
+
+/*
+ * task statistics for read response
+ */
+struct bnx2i_read_resp_task_stat {
+#if defined(__BIG_ENDIAN)
+       u16 num_data_outs;
+       u16 num_r2ts;
+#elif defined(__LITTLE_ENDIAN)
+       u16 num_r2ts;
+       u16 num_data_outs;
+#endif
+};
+
+/*
+ * task statistics for iSCSI cmd response
+ */
+union bnx2i_cmd_resp_task_stat {
+       struct bnx2i_write_resp_task_stat write_stat;
+       struct bnx2i_read_resp_task_stat read_stat;
+};
+
+/*
+ * SCSI Command CQE
+ */
+struct bnx2i_cmd_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+       u8 response;
+       u8 status;
+#elif defined(__LITTLE_ENDIAN)
+       u8 status;
+       u8 response;
+       u8 response_flags;
+#define ISCSI_CMD_RESPONSE_RESERVED0 (0x1<<0)
+#define ISCSI_CMD_RESPONSE_RESERVED0_SHIFT 0
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW (0x1<<1)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_UNDERFLOW_SHIFT 1
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW (0x1<<2)
+#define ISCSI_CMD_RESPONSE_RESIDUAL_OVERFLOW_SHIFT 2
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW (0x1<<3)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_UNDERFLOW_SHIFT 3
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW (0x1<<4)
+#define ISCSI_CMD_RESPONSE_BR_RESIDUAL_OVERFLOW_SHIFT 4
+#define ISCSI_CMD_RESPONSE_RESERVED1 (0x7<<5)
+#define ISCSI_CMD_RESPONSE_RESERVED1_SHIFT 5
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved2;
+       u32 residual_count;
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5[5];
+       union bnx2i_cmd_resp_task_stat task_stat;
+       u32 reserved6;
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_CMD_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_CMD_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_CMD_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_CMD_RESPONSE_TYPE_SHIFT 14
+       u16 reserved7;
+#endif
+       u32 cq_req_sn;
+};
+
+
+
+/*
+ * firmware middle-path request SQ WQE
+ */
+struct bnx2i_fw_mp_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+       u16 hdr_opaque1;
+#elif defined(__LITTLE_ENDIAN)
+       u16 hdr_opaque1;
+       u8 op_attr;
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 hdr_opaque2[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved0;
+       u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_FW_MP_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_FW_MP_REQUEST_INDEX_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_TYPE (0x3<<14)
+#define ISCSI_FW_MP_REQUEST_TYPE_SHIFT 14
+       u16 reserved0;
+#endif
+       u32 hdr_opaque3[4];
+       u32 resp_bd_list_addr_lo;
+       u32 resp_bd_list_addr_hi;
+       u32 resp_buffer;
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_FW_MP_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_FW_MP_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 reserved3;
+       u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+       u8 flags;
+#define ISCSI_FW_MP_REQUEST_RESERVED1 (0x1<<0)
+#define ISCSI_FW_MP_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_FW_MP_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_FW_MP_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_FW_MP_REQUEST_RESERVED2 (0x1F<<3)
+#define ISCSI_FW_MP_REQUEST_RESERVED2_SHIFT 3
+       u8 reserved3;
+       u16 reserved4;
+#endif
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved6;
+       u8 reserved5;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved5;
+       u8 reserved6;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * firmware response - CQE: used only by firmware
+ */
+struct bnx2i_fw_response {
+       u32 hdr_dword1[2];
+       u32 hdr_exp_cmd_sn;
+       u32 hdr_max_cmd_sn;
+       u32 hdr_ttt;
+       u32 hdr_res_cnt;
+       u32 cqe_flags;
+#define ISCSI_FW_RESPONSE_RESERVED2 (0xFF<<0)
+#define ISCSI_FW_RESPONSE_RESERVED2_SHIFT 0
+#define ISCSI_FW_RESPONSE_ERR_CODE (0xFF<<8)
+#define ISCSI_FW_RESPONSE_ERR_CODE_SHIFT 8
+#define ISCSI_FW_RESPONSE_RESERVED3 (0xFFFF<<16)
+#define ISCSI_FW_RESPONSE_RESERVED3_SHIFT 16
+       u32 stat_sn;
+       u32 hdr_dword2[2];
+       u32 hdr_dword3[2];
+       u32 task_stat;
+       u32 reserved0;
+       u32 hdr_itt;
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI KCQ CQE parameters
+ */
+union iscsi_kcqe_params {
+       u32 reserved0[4];
+};
+
+/*
+ * iSCSI KCQ CQE
+ */
+struct iscsi_kcqe {
+       u32 iscsi_conn_id;
+       u32 completion_status;
+       u32 iscsi_conn_context_id;
+       union iscsi_kcqe_params params;
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+       u8 op_code;
+       u16 qe_self_seq;
+#elif defined(__LITTLE_ENDIAN)
+       u16 qe_self_seq;
+       u8 op_code;
+       u8 flags;
+#define ISCSI_KCQE_RESERVED0 (0xF<<0)
+#define ISCSI_KCQE_RESERVED0_SHIFT 0
+#define ISCSI_KCQE_LAYER_CODE (0x7<<4)
+#define ISCSI_KCQE_LAYER_CODE_SHIFT 4
+#define ISCSI_KCQE_RESERVED1 (0x1<<7)
+#define ISCSI_KCQE_RESERVED1_SHIFT 7
+#endif
+};
+
+
+
+/*
+ * iSCSI KWQE header
+ */
+struct iscsi_kwqe_header {
+#if defined(__BIG_ENDIAN)
+       u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+       u8 op_code;
+#elif defined(__LITTLE_ENDIAN)
+       u8 op_code;
+       u8 flags;
+#define ISCSI_KWQE_HEADER_RESERVED0 (0xF<<0)
+#define ISCSI_KWQE_HEADER_RESERVED0_SHIFT 0
+#define ISCSI_KWQE_HEADER_LAYER_CODE (0x7<<4)
+#define ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT 4
+#define ISCSI_KWQE_HEADER_RESERVED1 (0x1<<7)
+#define ISCSI_KWQE_HEADER_RESERVED1_SHIFT 7
+#endif
+};
+
+/*
+ * iSCSI firmware init request 1
+ */
+struct iscsi_kwqe_init1 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u8 reserved0;
+       u8 num_cqs;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_cqs;
+       u8 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 dummy_buffer_addr_lo;
+       u32 dummy_buffer_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u16 num_ccells_per_conn;
+       u16 num_tasks_per_conn;
+#elif defined(__LITTLE_ENDIAN)
+       u16 num_tasks_per_conn;
+       u16 num_ccells_per_conn;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 sq_wqes_per_page;
+       u16 sq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+       u16 sq_num_wqes;
+       u16 sq_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 cq_log_wqes_per_page;
+       u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+       u16 cq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+       u16 cq_num_wqes;
+       u8 flags;
+#define ISCSI_KWQE_INIT1_PAGE_SIZE (0xF<<0)
+#define ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT 0
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE (0x1<<4)
+#define ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE_SHIFT 4
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE (0x1<<5)
+#define ISCSI_KWQE_INIT1_KEEP_ALIVE_ENABLE_SHIFT 5
+#define ISCSI_KWQE_INIT1_RESERVED1 (0x3<<6)
+#define ISCSI_KWQE_INIT1_RESERVED1_SHIFT 6
+       u8 cq_log_wqes_per_page;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 cq_num_pages;
+       u16 sq_num_pages;
+#elif defined(__LITTLE_ENDIAN)
+       u16 sq_num_pages;
+       u16 cq_num_pages;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 rq_buffer_size;
+       u16 rq_num_wqes;
+#elif defined(__LITTLE_ENDIAN)
+       u16 rq_num_wqes;
+       u16 rq_buffer_size;
+#endif
+};
+
+/*
+ * iSCSI firmware init request 2
+ */
+struct iscsi_kwqe_init2 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 max_cq_sqn;
+#elif defined(__LITTLE_ENDIAN)
+       u16 max_cq_sqn;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 error_bit_map[2];
+       u32 reserved1[5];
+};
+
+/*
+ * Initial iSCSI connection offload request 1
+ */
+struct iscsi_kwqe_conn_offload1 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 iscsi_conn_id;
+#elif defined(__LITTLE_ENDIAN)
+       u16 iscsi_conn_id;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 sq_page_table_addr_lo;
+       u32 sq_page_table_addr_hi;
+       u32 cq_page_table_addr_lo;
+       u32 cq_page_table_addr_hi;
+       u32 reserved0[3];
+};
+
+/*
+ * iSCSI Page Table Entry (PTE)
+ */
+struct iscsi_pte {
+       u32 hi;
+       u32 lo;
+};
+
+/*
+ * Initial iSCSI connection offload request 2
+ */
+struct iscsi_kwqe_conn_offload2 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 rq_page_table_addr_lo;
+       u32 rq_page_table_addr_hi;
+       struct iscsi_pte sq_first_pte;
+       struct iscsi_pte cq_first_pte;
+       u32 num_additional_wqes;
+};
+
+
+/*
+ * Initial iSCSI connection offload request 3
+ */
+struct iscsi_kwqe_conn_offload3 {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 reserved1;
+       struct iscsi_pte qp_first_pte[3];
+};
+
+
+/*
+ * iSCSI connection update request
+ */
+struct iscsi_kwqe_conn_update {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 session_error_recovery_level;
+       u8 max_outstanding_r2ts;
+       u8 reserved2;
+       u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+#elif defined(__LITTLE_ENDIAN)
+       u8 conn_flags;
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST (0x1<<0)
+#define ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST_SHIFT 0
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST (0x1<<1)
+#define ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST_SHIFT 1
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T (0x1<<2)
+#define ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T_SHIFT 2
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA (0x1<<3)
+#define ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA_SHIFT 3
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1 (0xF<<4)
+#define ISCSI_KWQE_CONN_UPDATE_RESERVED1_SHIFT 4
+       u8 reserved2;
+       u8 max_outstanding_r2ts;
+       u8 session_error_recovery_level;
+#endif
+       u32 context_id;
+       u32 max_send_pdu_length;
+       u32 max_recv_pdu_length;
+       u32 first_burst_length;
+       u32 max_burst_length;
+       u32 exp_stat_sn;
+};
+
+/*
+ * iSCSI destroy connection request
+ */
+struct iscsi_kwqe_conn_destroy {
+#if defined(__BIG_ENDIAN)
+       struct iscsi_kwqe_header hdr;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       struct iscsi_kwqe_header hdr;
+#endif
+       u32 context_id;
+       u32 reserved1[6];
+};
+
+/*
+ * iSCSI KWQ WQE
+ */
+union iscsi_kwqe {
+       struct iscsi_kwqe_init1 init1;
+       struct iscsi_kwqe_init2 init2;
+       struct iscsi_kwqe_conn_offload1 conn_offload1;
+       struct iscsi_kwqe_conn_offload2 conn_offload2;
+       struct iscsi_kwqe_conn_update conn_update;
+       struct iscsi_kwqe_conn_destroy conn_destroy;
+};
+
+/*
+ * iSCSI Login SQ WQE
+ */
+struct bnx2i_login_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+       u8 version_max;
+       u8 version_min;
+#elif defined(__LITTLE_ENDIAN)
+       u8 version_min;
+       u8 version_max;
+       u8 op_attr;
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_REQUEST_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_REQUEST_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_REQUEST_CONT (0x1<<6)
+#define ISCSI_LOGIN_REQUEST_CONT_SHIFT 6
+#define ISCSI_LOGIN_REQUEST_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_REQUEST_TRANSIT_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+       u16 isid_hi;
+       u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+       u16 tsih;
+       u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 reserved2;
+       u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_LOGIN_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGIN_REQUEST_TYPE_SHIFT 14
+       u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 cid;
+       u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved3;
+       u16 cid;
+#endif
+       u32 cmd_sn;
+       u32 exp_stat_sn;
+       u32 reserved4;
+       u32 resp_bd_list_addr_lo;
+       u32 resp_bd_list_addr_hi;
+       u32 resp_buffer;
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+       u16 reserved8;
+       u8 reserved7;
+       u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+#elif defined(__LITTLE_ENDIAN)
+       u8 flags;
+#define ISCSI_LOGIN_REQUEST_RESERVED5 (0x3<<0)
+#define ISCSI_LOGIN_REQUEST_RESERVED5_SHIFT 0
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN (0x1<<2)
+#define ISCSI_LOGIN_REQUEST_UPDATE_EXP_STAT_SN_SHIFT 2
+#define ISCSI_LOGIN_REQUEST_RESERVED6 (0x1F<<3)
+#define ISCSI_LOGIN_REQUEST_RESERVED6_SHIFT 3
+       u8 reserved7;
+       u16 reserved8;
+#endif
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved10;
+       u8 reserved9;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved9;
+       u8 reserved10;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Login CQE
+ */
+struct bnx2i_login_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+       u8 version_max;
+       u8 version_active;
+#elif defined(__LITTLE_ENDIAN)
+       u8 version_active;
+       u8 version_max;
+       u8 response_flags;
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE (0x3<<0)
+#define ISCSI_LOGIN_RESPONSE_NEXT_STAGE_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE (0x3<<2)
+#define ISCSI_LOGIN_RESPONSE_CURRENT_STAGE_SHIFT 2
+#define ISCSI_LOGIN_RESPONSE_RESERVED0 (0x3<<4)
+#define ISCSI_LOGIN_RESPONSE_RESERVED0_SHIFT 4
+#define ISCSI_LOGIN_RESPONSE_CONT (0x1<<6)
+#define ISCSI_LOGIN_RESPONSE_CONT_SHIFT 6
+#define ISCSI_LOGIN_RESPONSE_TRANSIT (0x1<<7)
+#define ISCSI_LOGIN_RESPONSE_TRANSIT_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved3;
+       u8 err_code;
+       u8 reserved2;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved2;
+       u8 err_code;
+       u16 reserved3;
+#endif
+       u32 stat_sn;
+       u32 isid_lo;
+#if defined(__BIG_ENDIAN)
+       u16 isid_hi;
+       u16 tsih;
+#elif defined(__LITTLE_ENDIAN)
+       u16 tsih;
+       u16 isid_hi;
+#endif
+#if defined(__BIG_ENDIAN)
+       u8 status_class;
+       u8 status_detail;
+       u16 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved4;
+       u8 status_detail;
+       u8 status_class;
+#endif
+       u32 reserved5[3];
+#if defined(__BIG_ENDIAN)
+       u16 reserved6;
+       u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_LOGIN_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGIN_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGIN_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGIN_RESPONSE_TYPE_SHIFT 14
+       u16 reserved6;
+#endif
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Logout SQ WQE
+ */
+struct bnx2i_logout_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_LOGOUT_REQUEST_REASON (0x7F<<0)
+#define ISCSI_LOGOUT_REQUEST_REASON_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_LOGOUT_REQUEST_ALWAYS_ONE_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 reserved1[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved2;
+       u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_LOGOUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_REQUEST_TYPE_SHIFT 14
+       u16 reserved2;
+#endif
+#if defined(__BIG_ENDIAN)
+       u16 cid;
+       u16 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved3;
+       u16 cid;
+#endif
+       u32 cmd_sn;
+       u32 reserved4[5];
+       u32 zero_fill;
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved6;
+       u8 reserved5;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved5;
+       u8 reserved6;
+       u8 cq_index;
+#endif
+};
+
+
+/*
+ * iSCSI Logout CQE
+ */
+struct bnx2i_logout_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u8 response;
+       u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved0;
+       u8 response;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 reserved2;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved5;
+       u8 err_code;
+       u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved4;
+       u8 err_code;
+       u16 reserved5;
+#endif
+       u32 reserved6[3];
+#if defined(__BIG_ENDIAN)
+       u16 time_to_wait;
+       u16 time_to_retain;
+#elif defined(__LITTLE_ENDIAN)
+       u16 time_to_retain;
+       u16 time_to_wait;
+#endif
+       u32 reserved7[3];
+#if defined(__BIG_ENDIAN)
+       u16 reserved8;
+       u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_LOGOUT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_LOGOUT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_LOGOUT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_LOGOUT_RESPONSE_TYPE_SHIFT 14
+       u16 reserved8;
+#endif
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI Nop-In CQE
+ */
+struct bnx2i_nop_in_msg {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 ttt;
+       u32 reserved2;
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5;
+       u32 lun[2];
+       u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_NOP_IN_MSG_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_IN_MSG_INDEX_SHIFT 0
+#define ISCSI_NOP_IN_MSG_TYPE (0x3<<14)
+#define ISCSI_NOP_IN_MSG_TYPE_SHIFT 14
+       u16 reserved7;
+#endif
+       u32 cq_req_sn;
+};
+
+
+/*
+ * iSCSI NOP-OUT SQ WQE
+ */
+struct bnx2i_nop_out_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1 (0x7F<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_NOP_OUT_REQUEST_ALWAYS_ONE_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved2;
+       u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_NOP_OUT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_NOP_OUT_REQUEST_TYPE_SHIFT 14
+       u16 reserved2;
+#endif
+       u32 ttt;
+       u32 cmd_sn;
+       u32 reserved3[2];
+       u32 resp_bd_list_addr_lo;
+       u32 resp_bd_list_addr_hi;
+       u32 resp_buffer;
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_NOP_OUT_REQUEST_NUM_RESP_BDS_SHIFT 24
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u8 reserved6;
+       u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+#elif defined(__LITTLE_ENDIAN)
+       u8 flags;
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4 (0x1<<0)
+#define ISCSI_NOP_OUT_REQUEST_RESERVED4_SHIFT 0
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION (0x1<<1)
+#define ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION_SHIFT 1
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL (0x3F<<2)
+#define ISCSI_NOP_OUT_REQUEST_ZERO_FILL_SHIFT 2
+       u8 reserved6;
+       u16 reserved7;
+#endif
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved9;
+       u8 reserved8;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved8;
+       u8 reserved9;
+       u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Reject CQE
+ */
+struct bnx2i_reject_msg {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u8 reason;
+       u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved0;
+       u8 reason;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved2[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5[8];
+       u32 cq_req_sn;
+};
+
+/*
+ * bnx2i iSCSI TMF SQ WQE
+ */
+struct bnx2i_tmf_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_TMF_REQUEST_FUNCTION (0x7F<<0)
+#define ISCSI_TMF_REQUEST_FUNCTION_SHIFT 0
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE (0x1<<7)
+#define ISCSI_TMF_REQUEST_ALWAYS_ONE_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved1;
+       u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_TMF_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TMF_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TMF_REQUEST_TYPE_SHIFT 14
+       u16 reserved1;
+#endif
+       u32 ref_itt;
+       u32 cmd_sn;
+       u32 reserved2;
+       u32 ref_cmd_sn;
+       u32 reserved3[3];
+       u32 zero_fill;
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved5;
+       u8 reserved4;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved4;
+       u8 reserved5;
+       u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI Text SQ WQE
+ */
+struct bnx2i_text_request {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 op_attr;
+#define ISCSI_TEXT_REQUEST_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_REQUEST_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_REQUEST_CONT (0x1<<6)
+#define ISCSI_TEXT_REQUEST_CONT_SHIFT 6
+#define ISCSI_TEXT_REQUEST_FINAL (0x1<<7)
+#define ISCSI_TEXT_REQUEST_FINAL_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 lun[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved3;
+       u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_TEXT_REQUEST_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_REQUEST_INDEX_SHIFT 0
+#define ISCSI_TEXT_REQUEST_TYPE (0x3<<14)
+#define ISCSI_TEXT_REQUEST_TYPE_SHIFT 14
+       u16 reserved3;
+#endif
+       u32 ttt;
+       u32 cmd_sn;
+       u32 reserved4[2];
+       u32 resp_bd_list_addr_lo;
+       u32 resp_bd_list_addr_hi;
+       u32 resp_buffer;
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH (0xFFFFFF<<0)
+#define ISCSI_TEXT_REQUEST_RESP_BUFFER_LENGTH_SHIFT 0
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS (0xFF<<24)
+#define ISCSI_TEXT_REQUEST_NUM_RESP_BDS_SHIFT 24
+       u32 zero_fill;
+       u32 bd_list_addr_lo;
+       u32 bd_list_addr_hi;
+#if defined(__BIG_ENDIAN)
+       u8 cq_index;
+       u8 reserved7;
+       u8 reserved6;
+       u8 num_bds;
+#elif defined(__LITTLE_ENDIAN)
+       u8 num_bds;
+       u8 reserved6;
+       u8 reserved7;
+       u8 cq_index;
+#endif
+};
+
+/*
+ * iSCSI SQ WQE
+ */
+union iscsi_request {
+       struct bnx2i_cmd_request cmd;
+       struct bnx2i_tmf_request tmf;
+       struct bnx2i_nop_out_request nop_out;
+       struct bnx2i_login_request login_req;
+       struct bnx2i_text_request text;
+       struct bnx2i_logout_request logout_req;
+       struct bnx2i_cleanup_request cleanup;
+};
+
+
+/*
+ * iSCSI TMF CQE
+ */
+struct bnx2i_tmf_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 reserved1;
+       u8 response;
+       u8 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved0;
+       u8 response;
+       u8 reserved1;
+       u8 op_code;
+#endif
+       u32 reserved2;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 reserved3[2];
+#if defined(__BIG_ENDIAN)
+       u16 reserved5;
+       u8 err_code;
+       u8 reserved4;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved4;
+       u8 err_code;
+       u16 reserved5;
+#endif
+       u32 reserved6[7];
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_TMF_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TMF_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TMF_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TMF_RESPONSE_TYPE_SHIFT 14
+       u16 reserved7;
+#endif
+       u32 cq_req_sn;
+};
+
+/*
+ * iSCSI Text CQE
+ */
+struct bnx2i_text_response {
+#if defined(__BIG_ENDIAN)
+       u8 op_code;
+       u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+       u16 reserved0;
+#elif defined(__LITTLE_ENDIAN)
+       u16 reserved0;
+       u8 response_flags;
+#define ISCSI_TEXT_RESPONSE_RESERVED1 (0x3F<<0)
+#define ISCSI_TEXT_RESPONSE_RESERVED1_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_CONT (0x1<<6)
+#define ISCSI_TEXT_RESPONSE_CONT_SHIFT 6
+#define ISCSI_TEXT_RESPONSE_FINAL (0x1<<7)
+#define ISCSI_TEXT_RESPONSE_FINAL_SHIFT 7
+       u8 op_code;
+#endif
+       u32 data_length;
+       u32 exp_cmd_sn;
+       u32 max_cmd_sn;
+       u32 ttt;
+       u32 reserved2;
+#if defined(__BIG_ENDIAN)
+       u16 reserved4;
+       u8 err_code;
+       u8 reserved3;
+#elif defined(__LITTLE_ENDIAN)
+       u8 reserved3;
+       u8 err_code;
+       u16 reserved4;
+#endif
+       u32 reserved5;
+       u32 lun[2];
+       u32 reserved6[4];
+#if defined(__BIG_ENDIAN)
+       u16 reserved7;
+       u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+#elif defined(__LITTLE_ENDIAN)
+       u16 itt;
+#define ISCSI_TEXT_RESPONSE_INDEX (0x3FFF<<0)
+#define ISCSI_TEXT_RESPONSE_INDEX_SHIFT 0
+#define ISCSI_TEXT_RESPONSE_TYPE (0x3<<14)
+#define ISCSI_TEXT_RESPONSE_TYPE_SHIFT 14
+       u16 reserved7;
+#endif
+       u32 cq_req_sn;
+};
+
+/*
+ * iSCSI CQE
+ */
+union iscsi_response {
+       struct bnx2i_cmd_response cmd;
+       struct bnx2i_tmf_response tmf;
+       struct bnx2i_login_response login_resp;
+       struct bnx2i_text_response text;
+       struct bnx2i_logout_response logout_resp;
+       struct bnx2i_cleanup_response cleanup;
+       struct bnx2i_reject_msg reject;
+       struct bnx2i_async_msg async;
+       struct bnx2i_nop_in_msg nop_in;
+};
+
+#endif /* __57XX_ISCSI_HSI_LINUX_LE__ */
diff --git a/drivers/scsi/bnx2i/Kconfig b/drivers/scsi/bnx2i/Kconfig
new file mode 100644 (file)
index 0000000..820d428
--- /dev/null
@@ -0,0 +1,7 @@
+config SCSI_BNX2_ISCSI
+       tristate "Broadcom NetXtreme II iSCSI support"
+       select SCSI_ISCSI_ATTRS
+       select CNIC
+       ---help---
+       This driver supports iSCSI offload for the Broadcom NetXtreme II
+       devices.
diff --git a/drivers/scsi/bnx2i/Makefile b/drivers/scsi/bnx2i/Makefile
new file mode 100644 (file)
index 0000000..b5802bd
--- /dev/null
@@ -0,0 +1,3 @@
+bnx2i-y := bnx2i_init.o bnx2i_hwi.o bnx2i_iscsi.o bnx2i_sysfs.o
+
+obj-$(CONFIG_SCSI_BNX2_ISCSI) += bnx2i.o
diff --git a/drivers/scsi/bnx2i/bnx2i.h b/drivers/scsi/bnx2i/bnx2i.h
new file mode 100644 (file)
index 0000000..d7576f2
--- /dev/null
@@ -0,0 +1,771 @@
+/* bnx2i.h: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#ifndef _BNX2I_H_
+#define _BNX2I_H_
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/kfifo.h>
+#include <linux/netdevice.h>
+#include <linux/completion.h>
+
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi.h>
+#include <scsi/iscsi_proto.h>
+#include <scsi/libiscsi.h>
+#include <scsi/scsi_transport_iscsi.h>
+
+#include "../../net/cnic_if.h"
+#include "57xx_iscsi_hsi.h"
+#include "57xx_iscsi_constants.h"
+
+#define BNX2_ISCSI_DRIVER_NAME         "bnx2i"
+
+#define BNX2I_MAX_ADAPTERS             8
+
+#define ISCSI_MAX_CONNS_PER_HBA                128
+#define ISCSI_MAX_SESS_PER_HBA         ISCSI_MAX_CONNS_PER_HBA
+#define ISCSI_MAX_CMDS_PER_SESS                128
+
+/* Total active commands across all connections supported by devices */
+#define ISCSI_MAX_CMDS_PER_HBA_5708    (28 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_5709    (128 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+#define ISCSI_MAX_CMDS_PER_HBA_57710   (256 * (ISCSI_MAX_CMDS_PER_SESS - 1))
+
+#define ISCSI_MAX_BDS_PER_CMD          32
+
+#define MAX_PAGES_PER_CTRL_STRUCT_POOL 8
+#define BNX2I_RESERVED_SLOW_PATH_CMD_SLOTS     4
+
+/* 5706/08 hardware has limit on maximum buffer size per BD it can handle */
+#define MAX_BD_LENGTH                  65535
+#define BD_SPLIT_SIZE                  32768
+
+/* min, max & default values for SQ/RQ/CQ size, configurable via' modparam */
+#define BNX2I_SQ_WQES_MIN              16
+#define BNX2I_570X_SQ_WQES_MAX                 128
+#define BNX2I_5770X_SQ_WQES_MAX        512
+#define BNX2I_570X_SQ_WQES_DEFAULT     128
+#define BNX2I_5770X_SQ_WQES_DEFAULT    256
+
+#define BNX2I_570X_CQ_WQES_MAX                 128
+#define BNX2I_5770X_CQ_WQES_MAX        512
+
+#define BNX2I_RQ_WQES_MIN              16
+#define BNX2I_RQ_WQES_MAX              32
+#define BNX2I_RQ_WQES_DEFAULT          16
+
+/* CCELLs per conn */
+#define BNX2I_CCELLS_MIN               16
+#define BNX2I_CCELLS_MAX               96
+#define BNX2I_CCELLS_DEFAULT           64
+
+#define ITT_INVALID_SIGNATURE          0xFFFF
+
+#define ISCSI_CMD_CLEANUP_TIMEOUT      100
+
+#define BNX2I_CONN_CTX_BUF_SIZE                16384
+
+#define BNX2I_SQ_WQE_SIZE              64
+#define BNX2I_RQ_WQE_SIZE              256
+#define BNX2I_CQE_SIZE                 64
+
+#define MB_KERNEL_CTX_SHIFT            8
+#define MB_KERNEL_CTX_SIZE             (1 << MB_KERNEL_CTX_SHIFT)
+
+#define CTX_SHIFT                      7
+#define GET_CID_NUM(cid_addr)          ((cid_addr) >> CTX_SHIFT)
+
+#define CTX_OFFSET                     0x10000
+#define MAX_CID_CNT                    0x4000
+
+/* 5709 context registers */
+#define BNX2_MQ_CONFIG2                        0x00003d00
+#define BNX2_MQ_CONFIG2_CONT_SZ                (0x7L<<4)
+#define BNX2_MQ_CONFIG2_FIRST_L4L5     (0x1fL<<8)
+
+/* 57710's BAR2 is mapped to doorbell registers */
+#define BNX2X_DOORBELL_PCI_BAR         2
+#define BNX2X_MAX_CQS                  8
+
+#define CNIC_ARM_CQE                   1
+#define CNIC_DISARM_CQE                        0
+
+#define REG_RD(__hba, offset)                          \
+               readl(__hba->regview + offset)
+#define REG_WR(__hba, offset, val)                     \
+               writel(val, __hba->regview + offset)
+
+
+/**
+ * struct generic_pdu_resc - login pdu resource structure
+ *
+ * @req_buf:            driver buffer used to stage payload associated with
+ *                      the login request
+ * @req_dma_addr:       dma address for iscsi login request payload buffer
+ * @req_buf_size:       actual login request payload length
+ * @req_wr_ptr:         pointer into login request buffer when next data is
+ *                      to be written
+ * @resp_hdr:           iscsi header where iscsi login response header is to
+ *                      be recreated
+ * @resp_buf:           buffer to stage login response payload
+ * @resp_dma_addr:      login response payload buffer dma address
+ * @resp_buf_size:      login response paylod length
+ * @resp_wr_ptr:        pointer into login response buffer when next data is
+ *                      to be written
+ * @req_bd_tbl:         iscsi login request payload BD table
+ * @req_bd_dma:         login request BD table dma address
+ * @resp_bd_tbl:        iscsi login response payload BD table
+ * @resp_bd_dma:        login request BD table dma address
+ *
+ * following structure defines buffer info for generic pdus such as iSCSI Login,
+ *     Logout and NOP
+ */
+struct generic_pdu_resc {
+       char *req_buf;
+       dma_addr_t req_dma_addr;
+       u32 req_buf_size;
+       char *req_wr_ptr;
+       struct iscsi_hdr resp_hdr;
+       char *resp_buf;
+       dma_addr_t resp_dma_addr;
+       u32 resp_buf_size;
+       char *resp_wr_ptr;
+       char *req_bd_tbl;
+       dma_addr_t req_bd_dma;
+       char *resp_bd_tbl;
+       dma_addr_t resp_bd_dma;
+};
+
+
+/**
+ * struct bd_resc_page - tracks DMA'able memory allocated for BD tables
+ *
+ * @link:               list head to link elements
+ * @max_ptrs:           maximun pointers that can be stored in this page
+ * @num_valid:          number of pointer valid in this page
+ * @page:               base addess for page pointer array
+ *
+ * structure to track DMA'able memory allocated for command BD tables
+ */
+struct bd_resc_page {
+       struct list_head link;
+       u32 max_ptrs;
+       u32 num_valid;
+       void *page[1];
+};
+
+
+/**
+ * struct io_bdt - I/O buffer destricptor table
+ *
+ * @bd_tbl:             BD table's virtual address
+ * @bd_tbl_dma:         BD table's dma address
+ * @bd_valid:           num valid BD entries
+ *
+ * IO BD table
+ */
+struct io_bdt {
+       struct iscsi_bd *bd_tbl;
+       dma_addr_t bd_tbl_dma;
+       u16 bd_valid;
+};
+
+
+/**
+ * bnx2i_cmd - iscsi command structure
+ *
+ * @scsi_cmd:           SCSI-ML task pointer corresponding to this iscsi cmd
+ * @sg:                 SG list
+ * @io_tbl:             buffer descriptor (BD) table
+ * @bd_tbl_dma:         buffer descriptor (BD) table's dma address
+ */
+struct bnx2i_cmd {
+       struct iscsi_hdr hdr;
+       struct bnx2i_conn *conn;
+       struct scsi_cmnd *scsi_cmd;
+       struct scatterlist *sg;
+       struct io_bdt io_tbl;
+       dma_addr_t bd_tbl_dma;
+       struct bnx2i_cmd_request req;
+};
+
+
+/**
+ * struct bnx2i_conn - iscsi connection structure
+ *
+ * @cls_conn:              pointer to iscsi cls conn
+ * @hba:                   adapter structure pointer
+ * @iscsi_conn_cid:        iscsi conn id
+ * @fw_cid:                firmware iscsi context id
+ * @ep:                    endpoint structure pointer
+ * @gen_pdu:               login/nopout/logout pdu resources
+ * @violation_notified:    bit mask used to track iscsi error/warning messages
+ *                         already printed out
+ *
+ * iSCSI connection structure
+ */
+struct bnx2i_conn {
+       struct iscsi_cls_conn *cls_conn;
+       struct bnx2i_hba *hba;
+       struct completion cmd_cleanup_cmpl;
+       int is_bound;
+
+       u32 iscsi_conn_cid;
+#define BNX2I_CID_RESERVED     0x5AFF
+       u32 fw_cid;
+
+       struct timer_list poll_timer;
+       /*
+        * Queue Pair (QP) related structure elements.
+        */
+       struct bnx2i_endpoint *ep;
+
+       /*
+        * Buffer for login negotiation process
+        */
+       struct generic_pdu_resc gen_pdu;
+       u64 violation_notified;
+};
+
+
+
+/**
+ * struct iscsi_cid_queue - Per adapter iscsi cid queue
+ *
+ * @cid_que_base:           queue base memory
+ * @cid_que:                queue memory pointer
+ * @cid_q_prod_idx:         produce index
+ * @cid_q_cons_idx:         consumer index
+ * @cid_q_max_idx:          max index. used to detect wrap around condition
+ * @cid_free_cnt:           queue size
+ * @conn_cid_tbl:           iscsi cid to conn structure mapping table
+ *
+ * Per adapter iSCSI CID Queue
+ */
+struct iscsi_cid_queue {
+       void *cid_que_base;
+       u32 *cid_que;
+       u32 cid_q_prod_idx;
+       u32 cid_q_cons_idx;
+       u32 cid_q_max_idx;
+       u32 cid_free_cnt;
+       struct bnx2i_conn **conn_cid_tbl;
+};
+
+/**
+ * struct bnx2i_hba - bnx2i adapter structure
+ *
+ * @link:                  list head to link elements
+ * @cnic:                  pointer to cnic device
+ * @pcidev:                pointer to pci dev
+ * @netdev:                pointer to netdev structure
+ * @regview:               mapped PCI register space
+ * @age:                   age, incremented by every recovery
+ * @cnic_dev_type:         cnic device type, 5706/5708/5709/57710
+ * @mail_queue_access:     mailbox queue access mode, applicable to 5709 only
+ * @reg_with_cnic:         indicates whether the device is register with CNIC
+ * @adapter_state:         adapter state, UP, GOING_DOWN, LINK_DOWN
+ * @mtu_supported:         Ethernet MTU supported
+ * @shost:                 scsi host pointer
+ * @max_sqes:              SQ size
+ * @max_rqes:              RQ size
+ * @max_cqes:              CQ size
+ * @num_ccell:             number of command cells per connection
+ * @ofld_conns_active:     active connection list
+ * @max_active_conns:      max offload connections supported by this device
+ * @cid_que:               iscsi cid queue
+ * @ep_rdwr_lock:          read / write lock to synchronize various ep lists
+ * @ep_ofld_list:          connection list for pending offload completion
+ * @ep_destroy_list:       connection list for pending offload completion
+ * @mp_bd_tbl:             BD table to be used with middle path requests
+ * @mp_bd_dma:             DMA address of 'mp_bd_tbl' memory buffer
+ * @dummy_buffer:          Dummy buffer to be used with zero length scsicmd reqs
+ * @dummy_buf_dma:         DMA address of 'dummy_buffer' memory buffer
+ * @lock:                         lock to synchonize access to hba structure
+ * @pci_did:               PCI device ID
+ * @pci_vid:               PCI vendor ID
+ * @pci_sdid:              PCI subsystem device ID
+ * @pci_svid:              PCI subsystem vendor ID
+ * @pci_func:              PCI function number in system pci tree
+ * @pci_devno:             PCI device number in system pci tree
+ * @num_wqe_sent:          statistic counter, total wqe's sent
+ * @num_cqe_rcvd:          statistic counter, total cqe's received
+ * @num_intr_claimed:      statistic counter, total interrupts claimed
+ * @link_changed_count:    statistic counter, num of link change notifications
+ *                         received
+ * @ipaddr_changed_count:  statistic counter, num times IP address changed while
+ *                         at least one connection is offloaded
+ * @num_sess_opened:       statistic counter, total num sessions opened
+ * @num_conn_opened:       statistic counter, total num conns opened on this hba
+ * @ctx_ccell_tasks:       captures number of ccells and tasks supported by
+ *                         currently offloaded connection, used to decode
+ *                         context memory
+ *
+ * Adapter Data Structure
+ */
+struct bnx2i_hba {
+       struct list_head link;
+       struct cnic_dev *cnic;
+       struct pci_dev *pcidev;
+       struct net_device *netdev;
+       void __iomem *regview;
+
+       u32 age;
+       unsigned long cnic_dev_type;
+               #define BNX2I_NX2_DEV_5706              0x0
+               #define BNX2I_NX2_DEV_5708              0x1
+               #define BNX2I_NX2_DEV_5709              0x2
+               #define BNX2I_NX2_DEV_57710             0x3
+       u32 mail_queue_access;
+               #define BNX2I_MQ_KERNEL_MODE            0x0
+               #define BNX2I_MQ_KERNEL_BYPASS_MODE     0x1
+               #define BNX2I_MQ_BIN_MODE               0x2
+       unsigned long  reg_with_cnic;
+               #define BNX2I_CNIC_REGISTERED           1
+
+       unsigned long  adapter_state;
+               #define ADAPTER_STATE_UP                0
+               #define ADAPTER_STATE_GOING_DOWN        1
+               #define ADAPTER_STATE_LINK_DOWN         2
+               #define ADAPTER_STATE_INIT_FAILED       31
+       unsigned int mtu_supported;
+               #define BNX2I_MAX_MTU_SUPPORTED         1500
+
+       struct Scsi_Host *shost;
+
+       u32 max_sqes;
+       u32 max_rqes;
+       u32 max_cqes;
+       u32 num_ccell;
+
+       int ofld_conns_active;
+
+       int max_active_conns;
+       struct iscsi_cid_queue cid_que;
+
+       rwlock_t ep_rdwr_lock;
+       struct list_head ep_ofld_list;
+       struct list_head ep_destroy_list;
+
+       /*
+        * BD table to be used with MP (Middle Path requests.
+        */
+       char *mp_bd_tbl;
+       dma_addr_t mp_bd_dma;
+       char *dummy_buffer;
+       dma_addr_t dummy_buf_dma;
+
+       spinlock_t lock;        /* protects hba structure access */
+       struct mutex net_dev_lock;/* sync net device access */
+
+       /*
+        * PCI related info.
+        */
+       u16 pci_did;
+       u16 pci_vid;
+       u16 pci_sdid;
+       u16 pci_svid;
+       u16 pci_func;
+       u16 pci_devno;
+
+       /*
+        * Following are a bunch of statistics useful during development
+        * and later stage for score boarding.
+        */
+       u32 num_wqe_sent;
+       u32 num_cqe_rcvd;
+       u32 num_intr_claimed;
+       u32 link_changed_count;
+       u32 ipaddr_changed_count;
+       u32 num_sess_opened;
+       u32 num_conn_opened;
+       unsigned int ctx_ccell_tasks;
+};
+
+
+/*******************************************************************************
+ *     QP [ SQ / RQ / CQ ] info.
+ ******************************************************************************/
+
+/*
+ * SQ/RQ/CQ generic structure definition
+ */
+struct         sqe {
+       u8 sqe_byte[BNX2I_SQ_WQE_SIZE];
+};
+
+struct         rqe {
+       u8 rqe_byte[BNX2I_RQ_WQE_SIZE];
+};
+
+struct         cqe {
+       u8 cqe_byte[BNX2I_CQE_SIZE];
+};
+
+
+enum {
+#if defined(__LITTLE_ENDIAN)
+       CNIC_EVENT_COAL_INDEX   = 0x0,
+       CNIC_SEND_DOORBELL      = 0x4,
+       CNIC_EVENT_CQ_ARM       = 0x7,
+       CNIC_RECV_DOORBELL      = 0x8
+#elif defined(__BIG_ENDIAN)
+       CNIC_EVENT_COAL_INDEX   = 0x2,
+       CNIC_SEND_DOORBELL      = 0x6,
+       CNIC_EVENT_CQ_ARM       = 0x4,
+       CNIC_RECV_DOORBELL      = 0xa
+#endif
+};
+
+
+/*
+ * CQ DB
+ */
+struct bnx2x_iscsi_cq_pend_cmpl {
+       /* CQ producer, updated by Ustorm */
+       u16 ustrom_prod;
+       /* CQ pending completion counter */
+       u16 pend_cntr;
+};
+
+
+struct bnx2i_5771x_cq_db {
+       struct bnx2x_iscsi_cq_pend_cmpl qp_pend_cmpl[BNX2X_MAX_CQS];
+       /* CQ pending completion ITT array */
+       u16 itt[BNX2X_MAX_CQS];
+       /* Cstorm CQ sequence to notify array, updated by driver */;
+       u16 sqn[BNX2X_MAX_CQS];
+       u32 reserved[4] /* 16 byte allignment */;
+};
+
+
+struct bnx2i_5771x_sq_rq_db {
+       u16 prod_idx;
+       u8 reserved0[14]; /* Pad structure size to 16 bytes */
+};
+
+
+struct bnx2i_5771x_dbell_hdr {
+       u8 header;
+       /* 1 for rx doorbell, 0 for tx doorbell */
+#define B577XX_DOORBELL_HDR_RX                         (0x1<<0)
+#define B577XX_DOORBELL_HDR_RX_SHIFT                   0
+       /* 0 for normal doorbell, 1 for advertise wnd doorbell */
+#define B577XX_DOORBELL_HDR_DB_TYPE                    (0x1<<1)
+#define B577XX_DOORBELL_HDR_DB_TYPE_SHIFT              1
+       /* rdma tx only: DPM transaction size specifier (64/128/256/512B) */
+#define B577XX_DOORBELL_HDR_DPM_SIZE                   (0x3<<2)
+#define B577XX_DOORBELL_HDR_DPM_SIZE_SHIFT             2
+       /* connection type */
+#define B577XX_DOORBELL_HDR_CONN_TYPE                  (0xF<<4)
+#define B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT            4
+};
+
+struct bnx2i_5771x_dbell {
+       struct bnx2i_5771x_dbell_hdr dbell;
+       u8 pad[3];
+
+};
+
+/**
+ * struct qp_info - QP (share queue region) atrributes structure
+ *
+ * @ctx_base:           ioremapped pci register base to access doorbell register
+ *                      pertaining to this offloaded connection
+ * @sq_virt:            virtual address of send queue (SQ) region
+ * @sq_phys:            DMA address of SQ memory region
+ * @sq_mem_size:        SQ size
+ * @sq_prod_qe:         SQ producer entry pointer
+ * @sq_cons_qe:         SQ consumer entry pointer
+ * @sq_first_qe:        virtaul address of first entry in SQ
+ * @sq_last_qe:         virtaul address of last entry in SQ
+ * @sq_prod_idx:        SQ producer index
+ * @sq_cons_idx:        SQ consumer index
+ * @sqe_left:           number sq entry left
+ * @sq_pgtbl_virt:      page table describing buffer consituting SQ region
+ * @sq_pgtbl_phys:      dma address of 'sq_pgtbl_virt'
+ * @sq_pgtbl_size:      SQ page table size
+ * @cq_virt:            virtual address of completion queue (CQ) region
+ * @cq_phys:            DMA address of RQ memory region
+ * @cq_mem_size:        CQ size
+ * @cq_prod_qe:         CQ producer entry pointer
+ * @cq_cons_qe:         CQ consumer entry pointer
+ * @cq_first_qe:        virtaul address of first entry in CQ
+ * @cq_last_qe:         virtaul address of last entry in CQ
+ * @cq_prod_idx:        CQ producer index
+ * @cq_cons_idx:        CQ consumer index
+ * @cqe_left:           number cq entry left
+ * @cqe_size:           size of each CQ entry
+ * @cqe_exp_seq_sn:     next expected CQE sequence number
+ * @cq_pgtbl_virt:      page table describing buffer consituting CQ region
+ * @cq_pgtbl_phys:      dma address of 'cq_pgtbl_virt'
+ * @cq_pgtbl_size:     CQ page table size
+ * @rq_virt:            virtual address of receive queue (RQ) region
+ * @rq_phys:            DMA address of RQ memory region
+ * @rq_mem_size:        RQ size
+ * @rq_prod_qe:         RQ producer entry pointer
+ * @rq_cons_qe:         RQ consumer entry pointer
+ * @rq_first_qe:        virtaul address of first entry in RQ
+ * @rq_last_qe:         virtaul address of last entry in RQ
+ * @rq_prod_idx:        RQ producer index
+ * @rq_cons_idx:        RQ consumer index
+ * @rqe_left:           number rq entry left
+ * @rq_pgtbl_virt:      page table describing buffer consituting RQ region
+ * @rq_pgtbl_phys:      dma address of 'rq_pgtbl_virt'
+ * @rq_pgtbl_size:      RQ page table size
+ *
+ * queue pair (QP) is a per connection shared data structure which is used
+ *     to send work requests (SQ), receive completion notifications (CQ)
+ *     and receive asynchoronous / scsi sense info (RQ). 'qp_info' structure
+ *     below holds queue memory, consumer/producer indexes and page table
+ *     information
+ */
+struct qp_info {
+       void __iomem *ctx_base;
+#define DPM_TRIGER_TYPE                        0x40
+
+#define BNX2I_570x_QUE_DB_SIZE         0
+#define BNX2I_5771x_QUE_DB_SIZE                16
+       struct sqe *sq_virt;
+       dma_addr_t sq_phys;
+       u32 sq_mem_size;
+
+       struct sqe *sq_prod_qe;
+       struct sqe *sq_cons_qe;
+       struct sqe *sq_first_qe;
+       struct sqe *sq_last_qe;
+       u16 sq_prod_idx;
+       u16 sq_cons_idx;
+       u32 sqe_left;
+
+       void *sq_pgtbl_virt;
+       dma_addr_t sq_pgtbl_phys;
+       u32 sq_pgtbl_size;      /* set to PAGE_SIZE for 5708 & 5709 */
+
+       struct cqe *cq_virt;
+       dma_addr_t cq_phys;
+       u32 cq_mem_size;
+
+       struct cqe *cq_prod_qe;
+       struct cqe *cq_cons_qe;
+       struct cqe *cq_first_qe;
+       struct cqe *cq_last_qe;
+       u16 cq_prod_idx;
+       u16 cq_cons_idx;
+       u32 cqe_left;
+       u32 cqe_size;
+       u32 cqe_exp_seq_sn;
+
+       void *cq_pgtbl_virt;
+       dma_addr_t cq_pgtbl_phys;
+       u32 cq_pgtbl_size;      /* set to PAGE_SIZE for 5708 & 5709 */
+
+       struct rqe *rq_virt;
+       dma_addr_t rq_phys;
+       u32 rq_mem_size;
+
+       struct rqe *rq_prod_qe;
+       struct rqe *rq_cons_qe;
+       struct rqe *rq_first_qe;
+       struct rqe *rq_last_qe;
+       u16 rq_prod_idx;
+       u16 rq_cons_idx;
+       u32 rqe_left;
+
+       void *rq_pgtbl_virt;
+       dma_addr_t rq_pgtbl_phys;
+       u32 rq_pgtbl_size;      /* set to PAGE_SIZE for 5708 & 5709 */
+};
+
+
+
+/*
+ * CID handles
+ */
+struct ep_handles {
+       u32 fw_cid;
+       u32 drv_iscsi_cid;
+       u16 pg_cid;
+       u16 rsvd;
+};
+
+
+enum {
+       EP_STATE_IDLE                   = 0x0,
+       EP_STATE_PG_OFLD_START          = 0x1,
+       EP_STATE_PG_OFLD_COMPL          = 0x2,
+       EP_STATE_OFLD_START             = 0x4,
+       EP_STATE_OFLD_COMPL             = 0x8,
+       EP_STATE_CONNECT_START          = 0x10,
+       EP_STATE_CONNECT_COMPL          = 0x20,
+       EP_STATE_ULP_UPDATE_START       = 0x40,
+       EP_STATE_ULP_UPDATE_COMPL       = 0x80,
+       EP_STATE_DISCONN_START          = 0x100,
+       EP_STATE_DISCONN_COMPL          = 0x200,
+       EP_STATE_CLEANUP_START          = 0x400,
+       EP_STATE_CLEANUP_CMPL           = 0x800,
+       EP_STATE_TCP_FIN_RCVD           = 0x1000,
+       EP_STATE_TCP_RST_RCVD           = 0x2000,
+       EP_STATE_PG_OFLD_FAILED         = 0x1000000,
+       EP_STATE_ULP_UPDATE_FAILED      = 0x2000000,
+       EP_STATE_CLEANUP_FAILED         = 0x4000000,
+       EP_STATE_OFLD_FAILED            = 0x8000000,
+       EP_STATE_CONNECT_FAILED         = 0x10000000,
+       EP_STATE_DISCONN_TIMEDOUT       = 0x20000000,
+};
+
+/**
+ * struct bnx2i_endpoint - representation of tcp connection in NX2 world
+ *
+ * @link:               list head to link elements
+ * @hba:                adapter to which this connection belongs
+ * @conn:               iscsi connection this EP is linked to
+ * @sess:               iscsi session this EP is linked to
+ * @cm_sk:              cnic sock struct
+ * @hba_age:            age to detect if 'iscsid' issues ep_disconnect()
+ *                      after HBA reset is completed by bnx2i/cnic/bnx2
+ *                      modules
+ * @state:              tracks offload connection state machine
+ * @teardown_mode:      indicates if conn teardown is abortive or orderly
+ * @qp:                 QP information
+ * @ids:                contains chip allocated *context id* & driver assigned
+ *                      *iscsi cid*
+ * @ofld_timer:         offload timer to detect timeout
+ * @ofld_wait:          wait queue
+ *
+ * Endpoint Structure - equivalent of tcp socket structure
+ */
+struct bnx2i_endpoint {
+       struct list_head link;
+       struct bnx2i_hba *hba;
+       struct bnx2i_conn *conn;
+       struct cnic_sock *cm_sk;
+       u32 hba_age;
+       u32 state;
+       unsigned long timestamp;
+       int num_active_cmds;
+
+       struct qp_info qp;
+       struct ep_handles ids;
+               #define ep_iscsi_cid    ids.drv_iscsi_cid
+               #define ep_cid          ids.fw_cid
+               #define ep_pg_cid       ids.pg_cid
+       struct timer_list ofld_timer;
+       wait_queue_head_t ofld_wait;
+};
+
+
+
+/* Global variables */
+extern unsigned int error_mask1, error_mask2;
+extern u64 iscsi_error_mask;
+extern unsigned int en_tcp_dack;
+extern unsigned int event_coal_div;
+
+extern struct scsi_transport_template *bnx2i_scsi_xport_template;
+extern struct iscsi_transport bnx2i_iscsi_transport;
+extern struct cnic_ulp_ops bnx2i_cnic_cb;
+
+extern unsigned int sq_size;
+extern unsigned int rq_size;
+
+extern struct device_attribute *bnx2i_dev_attributes[];
+
+
+
+/*
+ * Function Prototypes
+ */
+extern void bnx2i_identify_device(struct bnx2i_hba *hba);
+extern void bnx2i_register_device(struct bnx2i_hba *hba);
+
+extern void bnx2i_ulp_init(struct cnic_dev *dev);
+extern void bnx2i_ulp_exit(struct cnic_dev *dev);
+extern void bnx2i_start(void *handle);
+extern void bnx2i_stop(void *handle);
+extern void bnx2i_reg_dev_all(void);
+extern void bnx2i_unreg_dev_all(void);
+extern struct bnx2i_hba *get_adapter_list_head(void);
+
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+                                         u16 iscsi_cid);
+
+int bnx2i_alloc_ep_pool(void);
+void bnx2i_release_ep_pool(void);
+struct bnx2i_endpoint *bnx2i_ep_ofld_list_next(struct bnx2i_hba *hba);
+struct bnx2i_endpoint *bnx2i_ep_destroy_list_next(struct bnx2i_hba *hba);
+
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic);
+
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic);
+void bnx2i_free_hba(struct bnx2i_hba *hba);
+
+void bnx2i_get_rq_buf(struct bnx2i_conn *conn, char *ptr, int len);
+void bnx2i_put_rq_buf(struct bnx2i_conn *conn, int count);
+
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd);
+
+void bnx2i_drop_session(struct iscsi_cls_session *session);
+
+extern int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba);
+extern int bnx2i_send_iscsi_login(struct bnx2i_conn *conn,
+                                 struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_tmf(struct bnx2i_conn *conn,
+                                 struct iscsi_task *mtask);
+extern int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *conn,
+                                   struct bnx2i_cmd *cmnd);
+extern int bnx2i_send_iscsi_nopout(struct bnx2i_conn *conn,
+                                  struct iscsi_task *mtask, u32 ttt,
+                                  char *datap, int data_len, int unsol);
+extern int bnx2i_send_iscsi_logout(struct bnx2i_conn *conn,
+                                  struct iscsi_task *mtask);
+extern void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba,
+                                      struct bnx2i_cmd *cmd);
+extern void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba,
+                                    struct bnx2i_endpoint *ep);
+extern void bnx2i_update_iscsi_conn(struct iscsi_conn *conn);
+extern void bnx2i_send_conn_destroy(struct bnx2i_hba *hba,
+                                   struct bnx2i_endpoint *ep);
+
+extern int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba,
+                              struct bnx2i_endpoint *ep);
+extern void bnx2i_free_qp_resc(struct bnx2i_hba *hba,
+                              struct bnx2i_endpoint *ep);
+extern void bnx2i_ep_ofld_timer(unsigned long data);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_ofld_list(
+               struct bnx2i_hba *hba, u32 iscsi_cid);
+extern struct bnx2i_endpoint *bnx2i_find_ep_in_destroy_list(
+               struct bnx2i_hba *hba, u32 iscsi_cid);
+
+extern int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep);
+extern void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action);
+
+/* Debug related function prototypes */
+extern void bnx2i_print_pend_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_active_cmd_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_xmit_pdu_queue(struct bnx2i_conn *conn);
+extern void bnx2i_print_recv_state(struct bnx2i_conn *conn);
+
+#endif
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
new file mode 100644 (file)
index 0000000..906cef5
--- /dev/null
@@ -0,0 +1,2405 @@
+/* bnx2i_hwi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+/**
+ * bnx2i_get_cid_num - get cid from ep
+ * @ep:        endpoint pointer
+ *
+ * Only applicable to 57710 family of devices
+ */
+static u32 bnx2i_get_cid_num(struct bnx2i_endpoint *ep)
+{
+       u32 cid;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               cid = ep->ep_cid;
+       else
+               cid = GET_CID_NUM(ep->ep_cid);
+       return cid;
+}
+
+
+/**
+ * bnx2i_adjust_qp_size - Adjust SQ/RQ/CQ size for 57710 device type
+ * @hba:               Adapter for which adjustments is to be made
+ *
+ * Only applicable to 57710 family of devices
+ */
+static void bnx2i_adjust_qp_size(struct bnx2i_hba *hba)
+{
+       u32 num_elements_per_pg;
+
+       if (test_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type) ||
+           test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type) ||
+           test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+               if (!is_power_of_2(hba->max_sqes))
+                       hba->max_sqes = rounddown_pow_of_two(hba->max_sqes);
+
+               if (!is_power_of_2(hba->max_rqes))
+                       hba->max_rqes = rounddown_pow_of_two(hba->max_rqes);
+       }
+
+       /* Adjust each queue size if the user selection does not
+        * yield integral num of page buffers
+        */
+       /* adjust SQ */
+       num_elements_per_pg = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+       if (hba->max_sqes < num_elements_per_pg)
+               hba->max_sqes = num_elements_per_pg;
+       else if (hba->max_sqes % num_elements_per_pg)
+               hba->max_sqes = (hba->max_sqes + num_elements_per_pg - 1) &
+                                ~(num_elements_per_pg - 1);
+
+       /* adjust CQ */
+       num_elements_per_pg = PAGE_SIZE / BNX2I_CQE_SIZE;
+       if (hba->max_cqes < num_elements_per_pg)
+               hba->max_cqes = num_elements_per_pg;
+       else if (hba->max_cqes % num_elements_per_pg)
+               hba->max_cqes = (hba->max_cqes + num_elements_per_pg - 1) &
+                                ~(num_elements_per_pg - 1);
+
+       /* adjust RQ */
+       num_elements_per_pg = PAGE_SIZE / BNX2I_RQ_WQE_SIZE;
+       if (hba->max_rqes < num_elements_per_pg)
+               hba->max_rqes = num_elements_per_pg;
+       else if (hba->max_rqes % num_elements_per_pg)
+               hba->max_rqes = (hba->max_rqes + num_elements_per_pg - 1) &
+                                ~(num_elements_per_pg - 1);
+}
+
+
+/**
+ * bnx2i_get_link_state - get network interface link state
+ * @hba:       adapter instance pointer
+ *
+ * updates adapter structure flag based on netdev state
+ */
+static void bnx2i_get_link_state(struct bnx2i_hba *hba)
+{
+       if (test_bit(__LINK_STATE_NOCARRIER, &hba->netdev->state))
+               set_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+       else
+               clear_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_iscsi_license_error - displays iscsi license related error message
+ * @hba:               adapter instance pointer
+ * @error_code:                error classification
+ *
+ * Puts out an error log when driver is unable to offload iscsi connection
+ *     due to license restrictions
+ */
+static void bnx2i_iscsi_license_error(struct bnx2i_hba *hba, u32 error_code)
+{
+       if (error_code == ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED)
+               /* iSCSI offload not supported on this device */
+               printk(KERN_ERR "bnx2i: iSCSI not supported, dev=%s\n",
+                               hba->netdev->name);
+       if (error_code == ISCSI_KCQE_COMPLETION_STATUS_LOM_ISCSI_NOT_ENABLED)
+               /* iSCSI offload not supported on this LOM device */
+               printk(KERN_ERR "bnx2i: LOM is not enable to "
+                               "offload iSCSI connections, dev=%s\n",
+                               hba->netdev->name);
+       set_bit(ADAPTER_STATE_INIT_FAILED, &hba->adapter_state);
+}
+
+
+/**
+ * bnx2i_arm_cq_event_coalescing - arms CQ to enable EQ notification
+ * @ep:                endpoint (transport indentifier) structure
+ * @action:    action, ARM or DISARM. For now only ARM_CQE is used
+ *
+ * Arm'ing CQ will enable chip to generate global EQ events inorder to interrupt
+ *     the driver. EQ event is generated CQ index is hit or at least 1 CQ is
+ *     outstanding and on chip timer expires
+ */
+void bnx2i_arm_cq_event_coalescing(struct bnx2i_endpoint *ep, u8 action)
+{
+       struct bnx2i_5771x_cq_db *cq_db;
+       u16 cq_index;
+
+       if (!test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               return;
+
+       if (action == CNIC_ARM_CQE) {
+               cq_index = ep->qp.cqe_exp_seq_sn +
+                          ep->num_active_cmds / event_coal_div;
+               cq_index %= (ep->qp.cqe_size * 2 + 1);
+               if (!cq_index) {
+                       cq_index = 1;
+                       cq_db = (struct bnx2i_5771x_cq_db *)
+                                       ep->qp.cq_pgtbl_virt;
+                       cq_db->sqn[0] = cq_index;
+               }
+       }
+}
+
+
+/**
+ * bnx2i_get_rq_buf - copy RQ buffer contents to driver buffer
+ * @conn:              iscsi connection on which RQ event occured
+ * @ptr:               driver buffer to which RQ buffer contents is to
+ *                     be copied
+ * @len:               length of valid data inside RQ buf
+ *
+ * Copies RQ buffer contents from shared (DMA'able) memory region to
+ *     driver buffer. RQ is used to DMA unsolicitated iscsi pdu's and
+ *     scsi sense info
+ */
+void bnx2i_get_rq_buf(struct bnx2i_conn *bnx2i_conn, char *ptr, int len)
+{
+       if (!bnx2i_conn->ep->qp.rqe_left)
+               return;
+
+       bnx2i_conn->ep->qp.rqe_left--;
+       memcpy(ptr, (u8 *) bnx2i_conn->ep->qp.rq_cons_qe, len);
+       if (bnx2i_conn->ep->qp.rq_cons_qe == bnx2i_conn->ep->qp.rq_last_qe) {
+               bnx2i_conn->ep->qp.rq_cons_qe = bnx2i_conn->ep->qp.rq_first_qe;
+               bnx2i_conn->ep->qp.rq_cons_idx = 0;
+       } else {
+               bnx2i_conn->ep->qp.rq_cons_qe++;
+               bnx2i_conn->ep->qp.rq_cons_idx++;
+       }
+}
+
+
+static void bnx2i_ring_577xx_doorbell(struct bnx2i_conn *conn)
+{
+       struct bnx2i_5771x_dbell dbell;
+       u32 msg;
+
+       memset(&dbell, 0, sizeof(dbell));
+       dbell.dbell.header = (B577XX_ISCSI_CONNECTION_TYPE <<
+                             B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT);
+       msg = *((u32 *)&dbell);
+       /* TODO : get doorbell register mapping */
+       writel(cpu_to_le32(msg), conn->ep->qp.ctx_base);
+}
+
+
+/**
+ * bnx2i_put_rq_buf - Replenish RQ buffer, if required ring on chip doorbell
+ * @conn:      iscsi connection on which event to post
+ * @count:     number of RQ buffer being posted to chip
+ *
+ * No need to ring hardware doorbell for 57710 family of devices
+ */
+void bnx2i_put_rq_buf(struct bnx2i_conn *bnx2i_conn, int count)
+{
+       struct bnx2i_5771x_sq_rq_db *rq_db;
+       u16 hi_bit = (bnx2i_conn->ep->qp.rq_prod_idx & 0x8000);
+       struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+       ep->qp.rqe_left += count;
+       ep->qp.rq_prod_idx &= 0x7FFF;
+       ep->qp.rq_prod_idx += count;
+
+       if (ep->qp.rq_prod_idx > bnx2i_conn->hba->max_rqes) {
+               ep->qp.rq_prod_idx %= bnx2i_conn->hba->max_rqes;
+               if (!hi_bit)
+                       ep->qp.rq_prod_idx |= 0x8000;
+       } else
+               ep->qp.rq_prod_idx |= hi_bit;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+               rq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.rq_pgtbl_virt;
+               rq_db->prod_idx = ep->qp.rq_prod_idx;
+               /* no need to ring hardware doorbell for 57710 */
+       } else {
+               writew(ep->qp.rq_prod_idx,
+                      ep->qp.ctx_base + CNIC_RECV_DOORBELL);
+       }
+       mmiowb();
+}
+
+
+/**
+ * bnx2i_ring_sq_dbell - Ring SQ doorbell to wake-up the processing engine
+ * @conn:              iscsi connection to which new SQ entries belong
+ * @count:             number of SQ WQEs to post
+ *
+ * SQ DB is updated in host memory and TX Doorbell is rung for 57710 family
+ *     of devices. For 5706/5708/5709 new SQ WQE count is written into the
+ *     doorbell register
+ */
+static void bnx2i_ring_sq_dbell(struct bnx2i_conn *bnx2i_conn, int count)
+{
+       struct bnx2i_5771x_sq_rq_db *sq_db;
+       struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+
+       ep->num_active_cmds++;
+       wmb();  /* flush SQ WQE memory before the doorbell is rung */
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+               sq_db = (struct bnx2i_5771x_sq_rq_db *) ep->qp.sq_pgtbl_virt;
+               sq_db->prod_idx = ep->qp.sq_prod_idx;
+               bnx2i_ring_577xx_doorbell(bnx2i_conn);
+       } else
+               writew(count, ep->qp.ctx_base + CNIC_SEND_DOORBELL);
+
+       mmiowb(); /* flush posted PCI writes */
+}
+
+
+/**
+ * bnx2i_ring_dbell_update_sq_params - update SQ driver parameters
+ * @conn:      iscsi connection to which new SQ entries belong
+ * @count:     number of SQ WQEs to post
+ *
+ * this routine will update SQ driver parameters and ring the doorbell
+ */
+static void bnx2i_ring_dbell_update_sq_params(struct bnx2i_conn *bnx2i_conn,
+                                             int count)
+{
+       int tmp_cnt;
+
+       if (count == 1) {
+               if (bnx2i_conn->ep->qp.sq_prod_qe ==
+                   bnx2i_conn->ep->qp.sq_last_qe)
+                       bnx2i_conn->ep->qp.sq_prod_qe =
+                                               bnx2i_conn->ep->qp.sq_first_qe;
+               else
+                       bnx2i_conn->ep->qp.sq_prod_qe++;
+       } else {
+               if ((bnx2i_conn->ep->qp.sq_prod_qe + count) <=
+                   bnx2i_conn->ep->qp.sq_last_qe)
+                       bnx2i_conn->ep->qp.sq_prod_qe += count;
+               else {
+                       tmp_cnt = bnx2i_conn->ep->qp.sq_last_qe -
+                               bnx2i_conn->ep->qp.sq_prod_qe;
+                       bnx2i_conn->ep->qp.sq_prod_qe =
+                               &bnx2i_conn->ep->qp.sq_first_qe[count -
+                                                               (tmp_cnt + 1)];
+               }
+       }
+       bnx2i_conn->ep->qp.sq_prod_idx += count;
+       /* Ring the doorbell */
+       bnx2i_ring_sq_dbell(bnx2i_conn, bnx2i_conn->ep->qp.sq_prod_idx);
+}
+
+
+/**
+ * bnx2i_send_iscsi_login - post iSCSI login request MP WQE to hardware
+ * @conn:      iscsi connection
+ * @cmd:       driver command structure which is requesting
+ *             a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_login(struct bnx2i_conn *bnx2i_conn,
+                          struct iscsi_task *task)
+{
+       struct bnx2i_cmd *bnx2i_cmd;
+       struct bnx2i_login_request *login_wqe;
+       struct iscsi_login *login_hdr;
+       u32 dword;
+
+       bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+       login_hdr = (struct iscsi_login *)task->hdr;
+       login_wqe = (struct bnx2i_login_request *)
+                                               bnx2i_conn->ep->qp.sq_prod_qe;
+
+       login_wqe->op_code = login_hdr->opcode;
+       login_wqe->op_attr = login_hdr->flags;
+       login_wqe->version_max = login_hdr->max_version;
+       login_wqe->version_min = login_hdr->min_version;
+       login_wqe->data_length = ntoh24(login_hdr->dlength);
+       login_wqe->isid_lo = *((u32 *) login_hdr->isid);
+       login_wqe->isid_hi = *((u16 *) login_hdr->isid + 2);
+       login_wqe->tsih = login_hdr->tsih;
+       login_wqe->itt = task->itt |
+               (ISCSI_TASK_TYPE_MPATH << ISCSI_LOGIN_REQUEST_TYPE_SHIFT);
+       login_wqe->cid = login_hdr->cid;
+
+       login_wqe->cmd_sn = be32_to_cpu(login_hdr->cmdsn);
+       login_wqe->exp_stat_sn = be32_to_cpu(login_hdr->exp_statsn);
+
+       login_wqe->resp_bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_bd_dma;
+       login_wqe->resp_bd_list_addr_hi =
+               (u32) ((u64) bnx2i_conn->gen_pdu.resp_bd_dma >> 32);
+
+       dword = ((1 << ISCSI_LOGIN_REQUEST_NUM_RESP_BDS_SHIFT) |
+                (bnx2i_conn->gen_pdu.resp_buf_size <<
+                 ISCSI_LOGIN_REQUEST_RESP_BUFFER_LENGTH_SHIFT));
+       login_wqe->resp_buffer = dword;
+       login_wqe->flags = 0;
+       login_wqe->bd_list_addr_lo = (u32) bnx2i_conn->gen_pdu.req_bd_dma;
+       login_wqe->bd_list_addr_hi =
+               (u32) ((u64) bnx2i_conn->gen_pdu.req_bd_dma >> 32);
+       login_wqe->num_bds = 1;
+       login_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+       bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+       return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_tmf - post iSCSI task management request MP WQE to hardware
+ * @conn:      iscsi connection
+ * @mtask:     driver command structure which is requesting
+ *             a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI Login request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_tmf(struct bnx2i_conn *bnx2i_conn,
+                        struct iscsi_task *mtask)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct iscsi_tm *tmfabort_hdr;
+       struct scsi_cmnd *ref_sc;
+       struct iscsi_task *ctask;
+       struct bnx2i_cmd *bnx2i_cmd;
+       struct bnx2i_tmf_request *tmfabort_wqe;
+       u32 dword;
+
+       bnx2i_cmd = (struct bnx2i_cmd *)mtask->dd_data;
+       tmfabort_hdr = (struct iscsi_tm *)mtask->hdr;
+       tmfabort_wqe = (struct bnx2i_tmf_request *)
+                                               bnx2i_conn->ep->qp.sq_prod_qe;
+
+       tmfabort_wqe->op_code = tmfabort_hdr->opcode;
+       tmfabort_wqe->op_attr = 0;
+       tmfabort_wqe->op_attr =
+               ISCSI_TMF_REQUEST_ALWAYS_ONE | ISCSI_TM_FUNC_ABORT_TASK;
+       tmfabort_wqe->lun[0] = be32_to_cpu(tmfabort_hdr->lun[0]);
+       tmfabort_wqe->lun[1] = be32_to_cpu(tmfabort_hdr->lun[1]);
+
+       tmfabort_wqe->itt = (mtask->itt | (ISCSI_TASK_TYPE_MPATH << 14));
+       tmfabort_wqe->reserved2 = 0;
+       tmfabort_wqe->cmd_sn = be32_to_cpu(tmfabort_hdr->cmdsn);
+
+       ctask = iscsi_itt_to_task(conn, tmfabort_hdr->rtt);
+       if (!ctask || ctask->sc)
+               /*
+                * the iscsi layer must have completed the cmd while this
+                * was starting up.
+                */
+               return 0;
+       ref_sc = ctask->sc;
+
+       if (ref_sc->sc_data_direction == DMA_TO_DEVICE)
+               dword = (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+       else
+               dword = (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+       tmfabort_wqe->ref_itt = (dword | tmfabort_hdr->rtt);
+       tmfabort_wqe->ref_cmd_sn = be32_to_cpu(tmfabort_hdr->refcmdsn);
+
+       tmfabort_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+       tmfabort_wqe->bd_list_addr_hi = (u32)
+                               ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+       tmfabort_wqe->num_bds = 1;
+       tmfabort_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+       bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+       return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_scsicmd - post iSCSI scsicmd request WQE to hardware
+ * @conn:      iscsi connection
+ * @cmd:       driver command structure which is requesting
+ *             a WQE to sent to chip for further processing
+ *
+ * prepare and post an iSCSI SCSI-CMD request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_scsicmd(struct bnx2i_conn *bnx2i_conn,
+                            struct bnx2i_cmd *cmd)
+{
+       struct bnx2i_cmd_request *scsi_cmd_wqe;
+
+       scsi_cmd_wqe = (struct bnx2i_cmd_request *)
+                                               bnx2i_conn->ep->qp.sq_prod_qe;
+       memcpy(scsi_cmd_wqe, &cmd->req, sizeof(struct bnx2i_cmd_request));
+       scsi_cmd_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+       bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+       return 0;
+}
+
+/**
+ * bnx2i_send_iscsi_nopout - post iSCSI NOPOUT request WQE to hardware
+ * @conn:              iscsi connection
+ * @cmd:               driver command structure which is requesting
+ *                     a WQE to sent to chip for further processing
+ * @ttt:               TTT to be used when building pdu header
+ * @datap:             payload buffer pointer
+ * @data_len:          payload data length
+ * @unsol:             indicated whether nopout pdu is unsolicited pdu or
+ *                     in response to target's NOPIN w/ TTT != FFFFFFFF
+ *
+ * prepare and post a nopout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_nopout(struct bnx2i_conn *bnx2i_conn,
+                           struct iscsi_task *task, u32 ttt,
+                           char *datap, int data_len, int unsol)
+{
+       struct bnx2i_endpoint *ep = bnx2i_conn->ep;
+       struct bnx2i_cmd *bnx2i_cmd;
+       struct bnx2i_nop_out_request *nopout_wqe;
+       struct iscsi_nopout *nopout_hdr;
+
+       bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+       nopout_hdr = (struct iscsi_nopout *)task->hdr;
+       nopout_wqe = (struct bnx2i_nop_out_request *)ep->qp.sq_prod_qe;
+       nopout_wqe->op_code = nopout_hdr->opcode;
+       nopout_wqe->op_attr = ISCSI_FLAG_CMD_FINAL;
+       memcpy(nopout_wqe->lun, nopout_hdr->lun, 8);
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+               u32 tmp = nopout_hdr->lun[0];
+               /* 57710 requires LUN field to be swapped */
+               nopout_hdr->lun[0] = nopout_hdr->lun[1];
+               nopout_hdr->lun[1] = tmp;
+       }
+
+       nopout_wqe->itt = ((u16)task->itt |
+                          (ISCSI_TASK_TYPE_MPATH <<
+                           ISCSI_TMF_REQUEST_TYPE_SHIFT));
+       nopout_wqe->ttt = ttt;
+       nopout_wqe->flags = 0;
+       if (!unsol)
+               nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+       else if (nopout_hdr->itt == RESERVED_ITT)
+               nopout_wqe->flags = ISCSI_NOP_OUT_REQUEST_LOCAL_COMPLETION;
+
+       nopout_wqe->cmd_sn = be32_to_cpu(nopout_hdr->cmdsn);
+       nopout_wqe->data_length = data_len;
+       if (data_len) {
+               /* handle payload data, not required in first release */
+               printk(KERN_ALERT "NOPOUT: WARNING!! payload len != 0\n");
+       } else {
+               nopout_wqe->bd_list_addr_lo = (u32)
+                                       bnx2i_conn->hba->mp_bd_dma;
+               nopout_wqe->bd_list_addr_hi =
+                       (u32) ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+               nopout_wqe->num_bds = 1;
+       }
+       nopout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+       bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+       return 0;
+}
+
+
+/**
+ * bnx2i_send_iscsi_logout - post iSCSI logout request WQE to hardware
+ * @conn:      iscsi connection
+ * @cmd:       driver command structure which is requesting
+ *             a WQE to sent to chip for further processing
+ *
+ * prepare and post logout request WQE to CNIC firmware
+ */
+int bnx2i_send_iscsi_logout(struct bnx2i_conn *bnx2i_conn,
+                           struct iscsi_task *task)
+{
+       struct bnx2i_cmd *bnx2i_cmd;
+       struct bnx2i_logout_request *logout_wqe;
+       struct iscsi_logout *logout_hdr;
+
+       bnx2i_cmd = (struct bnx2i_cmd *)task->dd_data;
+       logout_hdr = (struct iscsi_logout *)task->hdr;
+
+       logout_wqe = (struct bnx2i_logout_request *)
+                                               bnx2i_conn->ep->qp.sq_prod_qe;
+       memset(logout_wqe, 0x00, sizeof(struct bnx2i_logout_request));
+
+       logout_wqe->op_code = logout_hdr->opcode;
+       logout_wqe->cmd_sn = be32_to_cpu(logout_hdr->cmdsn);
+       logout_wqe->op_attr =
+                       logout_hdr->flags | ISCSI_LOGOUT_REQUEST_ALWAYS_ONE;
+       logout_wqe->itt = ((u16)task->itt |
+                          (ISCSI_TASK_TYPE_MPATH <<
+                           ISCSI_LOGOUT_REQUEST_TYPE_SHIFT));
+       logout_wqe->data_length = 0;
+       logout_wqe->cid = 0;
+
+       logout_wqe->bd_list_addr_lo = (u32) bnx2i_conn->hba->mp_bd_dma;
+       logout_wqe->bd_list_addr_hi = (u32)
+                               ((u64) bnx2i_conn->hba->mp_bd_dma >> 32);
+       logout_wqe->num_bds = 1;
+       logout_wqe->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+       bnx2i_ring_dbell_update_sq_params(bnx2i_conn, 1);
+       return 0;
+}
+
+
+/**
+ * bnx2i_update_iscsi_conn - post iSCSI logout request WQE to hardware
+ * @conn:      iscsi connection which requires iscsi parameter update
+ *
+ * sends down iSCSI Conn Update request to move iSCSI conn to FFP
+ */
+void bnx2i_update_iscsi_conn(struct iscsi_conn *conn)
+{
+       struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+       struct bnx2i_hba *hba = bnx2i_conn->hba;
+       struct kwqe *kwqe_arr[2];
+       struct iscsi_kwqe_conn_update *update_wqe;
+       struct iscsi_kwqe_conn_update conn_update_kwqe;
+
+       update_wqe = &conn_update_kwqe;
+
+       update_wqe->hdr.op_code = ISCSI_KWQE_OPCODE_UPDATE_CONN;
+       update_wqe->hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       /* 5771x requires conn context id to be passed as is */
+       if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_conn->ep->hba->cnic_dev_type))
+               update_wqe->context_id = bnx2i_conn->ep->ep_cid;
+       else
+               update_wqe->context_id = (bnx2i_conn->ep->ep_cid >> 7);
+       update_wqe->conn_flags = 0;
+       if (conn->hdrdgst_en)
+               update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_HEADER_DIGEST;
+       if (conn->datadgst_en)
+               update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_DATA_DIGEST;
+       if (conn->session->initial_r2t_en)
+               update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_INITIAL_R2T;
+       if (conn->session->imm_data_en)
+               update_wqe->conn_flags |= ISCSI_KWQE_CONN_UPDATE_IMMEDIATE_DATA;
+
+       update_wqe->max_send_pdu_length = conn->max_xmit_dlength;
+       update_wqe->max_recv_pdu_length = conn->max_recv_dlength;
+       update_wqe->first_burst_length = conn->session->first_burst;
+       update_wqe->max_burst_length = conn->session->max_burst;
+       update_wqe->exp_stat_sn = conn->exp_statsn;
+       update_wqe->max_outstanding_r2ts = conn->session->max_r2t;
+       update_wqe->session_error_recovery_level = conn->session->erl;
+       iscsi_conn_printk(KERN_ALERT, conn,
+                         "bnx2i: conn update - MBL 0x%x FBL 0x%x"
+                         "MRDSL_I 0x%x MRDSL_T 0x%x \n",
+                         update_wqe->max_burst_length,
+                         update_wqe->first_burst_length,
+                         update_wqe->max_recv_pdu_length,
+                         update_wqe->max_send_pdu_length);
+
+       kwqe_arr[0] = (struct kwqe *) update_wqe;
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_ep_ofld_timer - post iSCSI logout request WQE to hardware
+ * @data:      endpoint (transport handle) structure pointer
+ *
+ * routine to handle connection offload/destroy request timeout
+ */
+void bnx2i_ep_ofld_timer(unsigned long data)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) data;
+
+       if (ep->state == EP_STATE_OFLD_START) {
+               printk(KERN_ALERT "ofld_timer: CONN_OFLD timeout\n");
+               ep->state = EP_STATE_OFLD_FAILED;
+       } else if (ep->state == EP_STATE_DISCONN_START) {
+               printk(KERN_ALERT "ofld_timer: CONN_DISCON timeout\n");
+               ep->state = EP_STATE_DISCONN_TIMEDOUT;
+       } else if (ep->state == EP_STATE_CLEANUP_START) {
+               printk(KERN_ALERT "ofld_timer: CONN_CLEANUP timeout\n");
+               ep->state = EP_STATE_CLEANUP_FAILED;
+       }
+
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+static int bnx2i_power_of2(u32 val)
+{
+       u32 power = 0;
+       if (val & (val - 1))
+               return power;
+       val--;
+       while (val) {
+               val = val >> 1;
+               power++;
+       }
+       return power;
+}
+
+
+/**
+ * bnx2i_send_cmd_cleanup_req - send iscsi cmd context clean-up request
+ * @hba:       adapter structure pointer
+ * @cmd:       driver command structure which is requesting
+ *             a WQE to sent to chip for further processing
+ *
+ * prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_cmd_cleanup_req(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+       struct bnx2i_cleanup_request *cmd_cleanup;
+
+       cmd_cleanup =
+               (struct bnx2i_cleanup_request *)cmd->conn->ep->qp.sq_prod_qe;
+       memset(cmd_cleanup, 0x00, sizeof(struct bnx2i_cleanup_request));
+
+       cmd_cleanup->op_code = ISCSI_OPCODE_CLEANUP_REQUEST;
+       cmd_cleanup->itt = cmd->req.itt;
+       cmd_cleanup->cq_index = 0; /* CQ# used for completion, 5771x only */
+
+       bnx2i_ring_dbell_update_sq_params(cmd->conn, 1);
+}
+
+
+/**
+ * bnx2i_send_conn_destroy - initiates iscsi connection teardown process
+ * @hba:       adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE to initiate
+ *     iscsi connection context clean-up process
+ */
+void bnx2i_send_conn_destroy(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       struct kwqe *kwqe_arr[2];
+       struct iscsi_kwqe_conn_destroy conn_cleanup;
+
+       memset(&conn_cleanup, 0x00, sizeof(struct iscsi_kwqe_conn_destroy));
+
+       conn_cleanup.hdr.op_code = ISCSI_KWQE_OPCODE_DESTROY_CONN;
+       conn_cleanup.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+       /* 5771x requires conn context id to be passed as is */
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               conn_cleanup.context_id = ep->ep_cid;
+       else
+               conn_cleanup.context_id = (ep->ep_cid >> 7);
+
+       conn_cleanup.reserved0 = (u16)ep->ep_iscsi_cid;
+
+       kwqe_arr[0] = (struct kwqe *) &conn_cleanup;
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 1);
+}
+
+
+/**
+ * bnx2i_570x_send_conn_ofld_req - initiates iscsi conn context setup process
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * 5706/5708/5709 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_570x_send_conn_ofld_req(struct bnx2i_hba *hba,
+                                         struct bnx2i_endpoint *ep)
+{
+       struct kwqe *kwqe_arr[2];
+       struct iscsi_kwqe_conn_offload1 ofld_req1;
+       struct iscsi_kwqe_conn_offload2 ofld_req2;
+       dma_addr_t dma_addr;
+       int num_kwqes = 2;
+       u32 *ptbl;
+
+       ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+       ofld_req1.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+       dma_addr = ep->qp.sq_pgtbl_phys;
+       ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       dma_addr = ep->qp.cq_pgtbl_phys;
+       ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+       ofld_req2.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       dma_addr = ep->qp.rq_pgtbl_phys;
+       ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+
+       ofld_req2.sq_first_pte.hi = *ptbl++;
+       ofld_req2.sq_first_pte.lo = *ptbl;
+
+       ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+       ofld_req2.cq_first_pte.hi = *ptbl++;
+       ofld_req2.cq_first_pte.lo = *ptbl;
+
+       kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+       kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+       ofld_req2.num_additional_wqes = 0;
+
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+
+/**
+ * bnx2i_5771x_send_conn_ofld_req - initiates iscsi connection context creation
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * 57710 specific - prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+static void bnx2i_5771x_send_conn_ofld_req(struct bnx2i_hba *hba,
+                                          struct bnx2i_endpoint *ep)
+{
+       struct kwqe *kwqe_arr[5];
+       struct iscsi_kwqe_conn_offload1 ofld_req1;
+       struct iscsi_kwqe_conn_offload2 ofld_req2;
+       struct iscsi_kwqe_conn_offload3 ofld_req3[1];
+       dma_addr_t dma_addr;
+       int num_kwqes = 2;
+       u32 *ptbl;
+
+       ofld_req1.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN1;
+       ofld_req1.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       ofld_req1.iscsi_conn_id = (u16) ep->ep_iscsi_cid;
+
+       dma_addr = ep->qp.sq_pgtbl_phys + ISCSI_SQ_DB_SIZE;
+       ofld_req1.sq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req1.sq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       dma_addr = ep->qp.cq_pgtbl_phys + ISCSI_CQ_DB_SIZE;
+       ofld_req1.cq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req1.cq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       ofld_req2.hdr.op_code = ISCSI_KWQE_OPCODE_OFFLOAD_CONN2;
+       ofld_req2.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       dma_addr = ep->qp.rq_pgtbl_phys + ISCSI_RQ_DB_SIZE;
+       ofld_req2.rq_page_table_addr_lo = (u32) dma_addr;
+       ofld_req2.rq_page_table_addr_hi = (u32) ((u64) dma_addr >> 32);
+
+       ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+       ofld_req2.sq_first_pte.hi = *ptbl++;
+       ofld_req2.sq_first_pte.lo = *ptbl;
+
+       ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+       ofld_req2.cq_first_pte.hi = *ptbl++;
+       ofld_req2.cq_first_pte.lo = *ptbl;
+
+       kwqe_arr[0] = (struct kwqe *) &ofld_req1;
+       kwqe_arr[1] = (struct kwqe *) &ofld_req2;
+
+       ofld_req2.num_additional_wqes = 1;
+       memset(ofld_req3, 0x00, sizeof(ofld_req3[0]));
+       ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+       ofld_req3[0].qp_first_pte[0].hi = *ptbl++;
+       ofld_req3[0].qp_first_pte[0].lo = *ptbl;
+
+       kwqe_arr[2] = (struct kwqe *) ofld_req3;
+       /* need if we decide to go with multiple KCQE's per conn */
+       num_kwqes += 1;
+
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
+}
+
+/**
+ * bnx2i_send_conn_ofld_req - initiates iscsi connection context setup process
+ *
+ * @hba:               adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * this routine prepares and posts CONN_OFLD_REQ1/2 KWQE
+ */
+void bnx2i_send_conn_ofld_req(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+               bnx2i_5771x_send_conn_ofld_req(hba, ep);
+       else
+               bnx2i_570x_send_conn_ofld_req(hba, ep);
+}
+
+
+/**
+ * setup_qp_page_tables - iscsi QP page table setup function
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * Sets up page tables for SQ/RQ/CQ, 1G/sec (5706/5708/5709) devices requires
+ *     64-bit address in big endian format. Whereas 10G/sec (57710) requires
+ *     PT in little endian format
+ */
+static void setup_qp_page_tables(struct bnx2i_endpoint *ep)
+{
+       int num_pages;
+       u32 *ptbl;
+       dma_addr_t page;
+       int cnic_dev_10g;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type))
+               cnic_dev_10g = 1;
+       else
+               cnic_dev_10g = 0;
+
+       /* SQ page table */
+       memset(ep->qp.sq_pgtbl_virt, 0, ep->qp.sq_pgtbl_size);
+       num_pages = ep->qp.sq_mem_size / PAGE_SIZE;
+       page = ep->qp.sq_phys;
+
+       if (cnic_dev_10g)
+               ptbl = (u32 *)((u8 *)ep->qp.sq_pgtbl_virt + ISCSI_SQ_DB_SIZE);
+       else
+               ptbl = (u32 *) ep->qp.sq_pgtbl_virt;
+       while (num_pages--) {
+               if (cnic_dev_10g) {
+                       /* PTE is written in little endian format for 57710 */
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       page += PAGE_SIZE;
+               } else {
+                       /* PTE is written in big endian format for
+                        * 5706/5708/5709 devices */
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       page += PAGE_SIZE;
+               }
+       }
+
+       /* RQ page table */
+       memset(ep->qp.rq_pgtbl_virt, 0, ep->qp.rq_pgtbl_size);
+       num_pages = ep->qp.rq_mem_size / PAGE_SIZE;
+       page = ep->qp.rq_phys;
+
+       if (cnic_dev_10g)
+               ptbl = (u32 *)((u8 *)ep->qp.rq_pgtbl_virt + ISCSI_RQ_DB_SIZE);
+       else
+               ptbl = (u32 *) ep->qp.rq_pgtbl_virt;
+       while (num_pages--) {
+               if (cnic_dev_10g) {
+                       /* PTE is written in little endian format for 57710 */
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       page += PAGE_SIZE;
+               } else {
+                       /* PTE is written in big endian format for
+                        * 5706/5708/5709 devices */
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       page += PAGE_SIZE;
+               }
+       }
+
+       /* CQ page table */
+       memset(ep->qp.cq_pgtbl_virt, 0, ep->qp.cq_pgtbl_size);
+       num_pages = ep->qp.cq_mem_size / PAGE_SIZE;
+       page = ep->qp.cq_phys;
+
+       if (cnic_dev_10g)
+               ptbl = (u32 *)((u8 *)ep->qp.cq_pgtbl_virt + ISCSI_CQ_DB_SIZE);
+       else
+               ptbl = (u32 *) ep->qp.cq_pgtbl_virt;
+       while (num_pages--) {
+               if (cnic_dev_10g) {
+                       /* PTE is written in little endian format for 57710 */
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       page += PAGE_SIZE;
+               } else {
+                       /* PTE is written in big endian format for
+                        * 5706/5708/5709 devices */
+                       *ptbl = (u32) ((u64) page >> 32);
+                       ptbl++;
+                       *ptbl = (u32) page;
+                       ptbl++;
+                       page += PAGE_SIZE;
+               }
+       }
+}
+
+
+/**
+ * bnx2i_alloc_qp_resc - allocates required resources for QP.
+ * @hba:       adapter structure pointer
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * Allocate QP (transport layer for iSCSI connection) resources, DMA'able
+ *     memory for SQ/RQ/CQ and page tables. EP structure elements such
+ *     as producer/consumer indexes/pointers, queue sizes and page table
+ *     contents are setup
+ */
+int bnx2i_alloc_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       struct bnx2i_5771x_cq_db *cq_db;
+
+       ep->hba = hba;
+       ep->conn = NULL;
+       ep->ep_cid = ep->ep_iscsi_cid = ep->ep_pg_cid = 0;
+
+       /* Allocate page table memory for SQ which is page aligned */
+       ep->qp.sq_mem_size = hba->max_sqes * BNX2I_SQ_WQE_SIZE;
+       ep->qp.sq_mem_size =
+               (ep->qp.sq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+       ep->qp.sq_pgtbl_size =
+               (ep->qp.sq_mem_size / PAGE_SIZE) * sizeof(void *);
+       ep->qp.sq_pgtbl_size =
+               (ep->qp.sq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       ep->qp.sq_pgtbl_virt =
+               dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+                                  &ep->qp.sq_pgtbl_phys, GFP_KERNEL);
+       if (!ep->qp.sq_pgtbl_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc SQ PT mem (%d)\n",
+                                 ep->qp.sq_pgtbl_size);
+               goto mem_alloc_err;
+       }
+
+       /* Allocate memory area for actual SQ element */
+       ep->qp.sq_virt =
+               dma_alloc_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+                                  &ep->qp.sq_phys, GFP_KERNEL);
+       if (!ep->qp.sq_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc SQ BD memory %d\n",
+                                 ep->qp.sq_mem_size);
+               goto mem_alloc_err;
+       }
+
+       memset(ep->qp.sq_virt, 0x00, ep->qp.sq_mem_size);
+       ep->qp.sq_first_qe = ep->qp.sq_virt;
+       ep->qp.sq_prod_qe = ep->qp.sq_first_qe;
+       ep->qp.sq_cons_qe = ep->qp.sq_first_qe;
+       ep->qp.sq_last_qe = &ep->qp.sq_first_qe[hba->max_sqes - 1];
+       ep->qp.sq_prod_idx = 0;
+       ep->qp.sq_cons_idx = 0;
+       ep->qp.sqe_left = hba->max_sqes;
+
+       /* Allocate page table memory for CQ which is page aligned */
+       ep->qp.cq_mem_size = hba->max_cqes * BNX2I_CQE_SIZE;
+       ep->qp.cq_mem_size =
+               (ep->qp.cq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+       ep->qp.cq_pgtbl_size =
+               (ep->qp.cq_mem_size / PAGE_SIZE) * sizeof(void *);
+       ep->qp.cq_pgtbl_size =
+               (ep->qp.cq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       ep->qp.cq_pgtbl_virt =
+               dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+                                  &ep->qp.cq_pgtbl_phys, GFP_KERNEL);
+       if (!ep->qp.cq_pgtbl_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc CQ PT memory %d\n",
+                                 ep->qp.cq_pgtbl_size);
+               goto mem_alloc_err;
+       }
+
+       /* Allocate memory area for actual CQ element */
+       ep->qp.cq_virt =
+               dma_alloc_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+                                  &ep->qp.cq_phys, GFP_KERNEL);
+       if (!ep->qp.cq_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc CQ BD memory %d\n",
+                                 ep->qp.cq_mem_size);
+               goto mem_alloc_err;
+       }
+       memset(ep->qp.cq_virt, 0x00, ep->qp.cq_mem_size);
+
+       ep->qp.cq_first_qe = ep->qp.cq_virt;
+       ep->qp.cq_prod_qe = ep->qp.cq_first_qe;
+       ep->qp.cq_cons_qe = ep->qp.cq_first_qe;
+       ep->qp.cq_last_qe = &ep->qp.cq_first_qe[hba->max_cqes - 1];
+       ep->qp.cq_prod_idx = 0;
+       ep->qp.cq_cons_idx = 0;
+       ep->qp.cqe_left = hba->max_cqes;
+       ep->qp.cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+       ep->qp.cqe_size = hba->max_cqes;
+
+       /* Invalidate all EQ CQE index, req only for 57710 */
+       cq_db = (struct bnx2i_5771x_cq_db *) ep->qp.cq_pgtbl_virt;
+       memset(cq_db->sqn, 0xFF, sizeof(cq_db->sqn[0]) * BNX2X_MAX_CQS);
+
+       /* Allocate page table memory for RQ which is page aligned */
+       ep->qp.rq_mem_size = hba->max_rqes * BNX2I_RQ_WQE_SIZE;
+       ep->qp.rq_mem_size =
+               (ep->qp.rq_mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+       ep->qp.rq_pgtbl_size =
+               (ep->qp.rq_mem_size / PAGE_SIZE) * sizeof(void *);
+       ep->qp.rq_pgtbl_size =
+               (ep->qp.rq_pgtbl_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       ep->qp.rq_pgtbl_virt =
+               dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+                                  &ep->qp.rq_pgtbl_phys, GFP_KERNEL);
+       if (!ep->qp.rq_pgtbl_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc RQ PT mem %d\n",
+                                 ep->qp.rq_pgtbl_size);
+               goto mem_alloc_err;
+       }
+
+       /* Allocate memory area for actual RQ element */
+       ep->qp.rq_virt =
+               dma_alloc_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+                                  &ep->qp.rq_phys, GFP_KERNEL);
+       if (!ep->qp.rq_virt) {
+               printk(KERN_ALERT "bnx2i: unable to alloc RQ BD memory %d\n",
+                                 ep->qp.rq_mem_size);
+               goto mem_alloc_err;
+       }
+
+       ep->qp.rq_first_qe = ep->qp.rq_virt;
+       ep->qp.rq_prod_qe = ep->qp.rq_first_qe;
+       ep->qp.rq_cons_qe = ep->qp.rq_first_qe;
+       ep->qp.rq_last_qe = &ep->qp.rq_first_qe[hba->max_rqes - 1];
+       ep->qp.rq_prod_idx = 0x8000;
+       ep->qp.rq_cons_idx = 0;
+       ep->qp.rqe_left = hba->max_rqes;
+
+       setup_qp_page_tables(ep);
+
+       return 0;
+
+mem_alloc_err:
+       bnx2i_free_qp_resc(hba, ep);
+       return -ENOMEM;
+}
+
+
+
+/**
+ * bnx2i_free_qp_resc - free memory resources held by QP
+ * @hba:       adapter structure pointer
+ * @ep:        endpoint (transport indentifier) structure
+ *
+ * Free QP resources - SQ/RQ/CQ memory and page tables.
+ */
+void bnx2i_free_qp_resc(struct bnx2i_hba *hba, struct bnx2i_endpoint *ep)
+{
+       if (ep->qp.ctx_base) {
+               iounmap(ep->qp.ctx_base);
+               ep->qp.ctx_base = NULL;
+       }
+       /* Free SQ mem */
+       if (ep->qp.sq_pgtbl_virt) {
+               dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_pgtbl_size,
+                                 ep->qp.sq_pgtbl_virt, ep->qp.sq_pgtbl_phys);
+               ep->qp.sq_pgtbl_virt = NULL;
+               ep->qp.sq_pgtbl_phys = 0;
+       }
+       if (ep->qp.sq_virt) {
+               dma_free_coherent(&hba->pcidev->dev, ep->qp.sq_mem_size,
+                                 ep->qp.sq_virt, ep->qp.sq_phys);
+               ep->qp.sq_virt = NULL;
+               ep->qp.sq_phys = 0;
+       }
+
+       /* Free RQ mem */
+       if (ep->qp.rq_pgtbl_virt) {
+               dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_pgtbl_size,
+                                 ep->qp.rq_pgtbl_virt, ep->qp.rq_pgtbl_phys);
+               ep->qp.rq_pgtbl_virt = NULL;
+               ep->qp.rq_pgtbl_phys = 0;
+       }
+       if (ep->qp.rq_virt) {
+               dma_free_coherent(&hba->pcidev->dev, ep->qp.rq_mem_size,
+                                 ep->qp.rq_virt, ep->qp.rq_phys);
+               ep->qp.rq_virt = NULL;
+               ep->qp.rq_phys = 0;
+       }
+
+       /* Free CQ mem */
+       if (ep->qp.cq_pgtbl_virt) {
+               dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_pgtbl_size,
+                                 ep->qp.cq_pgtbl_virt, ep->qp.cq_pgtbl_phys);
+               ep->qp.cq_pgtbl_virt = NULL;
+               ep->qp.cq_pgtbl_phys = 0;
+       }
+       if (ep->qp.cq_virt) {
+               dma_free_coherent(&hba->pcidev->dev, ep->qp.cq_mem_size,
+                                 ep->qp.cq_virt, ep->qp.cq_phys);
+               ep->qp.cq_virt = NULL;
+               ep->qp.cq_phys = 0;
+       }
+}
+
+
+/**
+ * bnx2i_send_fw_iscsi_init_msg - initiates initial handshake with iscsi f/w
+ * @hba:       adapter structure pointer
+ *
+ * Send down iscsi_init KWQEs which initiates the initial handshake with the f/w
+ *     This results in iSCSi support validation and on-chip context manager
+ *     initialization.  Firmware completes this handshake with a CQE carrying
+ *     the result of iscsi support validation. Parameter carried by
+ *     iscsi init request determines the number of offloaded connection and
+ *     tolerance level for iscsi protocol violation this hba/chip can support
+ */
+int bnx2i_send_fw_iscsi_init_msg(struct bnx2i_hba *hba)
+{
+       struct kwqe *kwqe_arr[3];
+       struct iscsi_kwqe_init1 iscsi_init;
+       struct iscsi_kwqe_init2 iscsi_init2;
+       int rc = 0;
+       u64 mask64;
+
+       bnx2i_adjust_qp_size(hba);
+
+       iscsi_init.flags =
+               ISCSI_PAGE_SIZE_4K << ISCSI_KWQE_INIT1_PAGE_SIZE_SHIFT;
+       if (en_tcp_dack)
+               iscsi_init.flags |= ISCSI_KWQE_INIT1_DELAYED_ACK_ENABLE;
+       iscsi_init.reserved0 = 0;
+       iscsi_init.num_cqs = 1;
+       iscsi_init.hdr.op_code = ISCSI_KWQE_OPCODE_INIT1;
+       iscsi_init.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+
+       iscsi_init.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
+       iscsi_init.dummy_buffer_addr_hi =
+               (u32) ((u64) hba->dummy_buf_dma >> 32);
+
+       hba->ctx_ccell_tasks =
+                       ((hba->num_ccell & 0xFFFF) | (hba->max_sqes << 16));
+       iscsi_init.num_ccells_per_conn = hba->num_ccell;
+       iscsi_init.num_tasks_per_conn = hba->max_sqes;
+       iscsi_init.sq_wqes_per_page = PAGE_SIZE / BNX2I_SQ_WQE_SIZE;
+       iscsi_init.sq_num_wqes = hba->max_sqes;
+       iscsi_init.cq_log_wqes_per_page =
+               (u8) bnx2i_power_of2(PAGE_SIZE / BNX2I_CQE_SIZE);
+       iscsi_init.cq_num_wqes = hba->max_cqes;
+       iscsi_init.cq_num_pages = (hba->max_cqes * BNX2I_CQE_SIZE +
+                                  (PAGE_SIZE - 1)) / PAGE_SIZE;
+       iscsi_init.sq_num_pages = (hba->max_sqes * BNX2I_SQ_WQE_SIZE +
+                                  (PAGE_SIZE - 1)) / PAGE_SIZE;
+       iscsi_init.rq_buffer_size = BNX2I_RQ_WQE_SIZE;
+       iscsi_init.rq_num_wqes = hba->max_rqes;
+
+
+       iscsi_init2.hdr.op_code = ISCSI_KWQE_OPCODE_INIT2;
+       iscsi_init2.hdr.flags =
+               (ISCSI_KWQE_LAYER_CODE << ISCSI_KWQE_HEADER_LAYER_CODE_SHIFT);
+       iscsi_init2.max_cq_sqn = hba->max_cqes * 2 + 1;
+       mask64 = 0x0ULL;
+       mask64 |= (
+               /* CISCO MDS */
+               (1UL <<
+                 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV) |
+               /* HP MSA1510i */
+               (1UL <<
+                 ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN) |
+               /* EMC */
+               (1ULL << ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN));
+       if (error_mask1)
+               iscsi_init2.error_bit_map[0] = error_mask1;
+       else
+               iscsi_init2.error_bit_map[0] = (u32) mask64;
+
+       if (error_mask2)
+               iscsi_init2.error_bit_map[1] = error_mask2;
+       else
+               iscsi_init2.error_bit_map[1] = (u32) (mask64 >> 32);
+
+       iscsi_error_mask = mask64;
+
+       kwqe_arr[0] = (struct kwqe *) &iscsi_init;
+       kwqe_arr[1] = (struct kwqe *) &iscsi_init2;
+
+       if (hba->cnic && hba->cnic->submit_kwqes)
+               rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, 2);
+       return rc;
+}
+
+
+/**
+ * bnx2i_process_scsi_cmd_resp - this function handles scsi cmd completion.
+ * @conn:      iscsi connection
+ * @cqe:       pointer to newly DMA'ed CQE entry for processing
+ *
+ * process SCSI CMD Response CQE & complete the request to SCSI-ML
+ */
+static int bnx2i_process_scsi_cmd_resp(struct iscsi_session *session,
+                                      struct bnx2i_conn *bnx2i_conn,
+                                      struct cqe *cqe)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct bnx2i_cmd_response *resp_cqe;
+       struct bnx2i_cmd *bnx2i_cmd;
+       struct iscsi_task *task;
+       struct iscsi_cmd_rsp *hdr;
+       u32 datalen = 0;
+
+       resp_cqe = (struct bnx2i_cmd_response *)cqe;
+       spin_lock(&session->lock);
+       task = iscsi_itt_to_task(conn,
+                                resp_cqe->itt & ISCSI_CMD_RESPONSE_INDEX);
+       if (!task)
+               goto fail;
+
+       bnx2i_cmd = task->dd_data;
+
+       if (bnx2i_cmd->req.op_attr & ISCSI_CMD_REQUEST_READ) {
+               conn->datain_pdus_cnt +=
+                       resp_cqe->task_stat.read_stat.num_data_outs;
+               conn->rxdata_octets +=
+                       bnx2i_cmd->req.total_data_transfer_length;
+       } else {
+               conn->dataout_pdus_cnt +=
+                       resp_cqe->task_stat.read_stat.num_data_outs;
+               conn->r2t_pdus_cnt +=
+                       resp_cqe->task_stat.read_stat.num_r2ts;
+               conn->txdata_octets +=
+                       bnx2i_cmd->req.total_data_transfer_length;
+       }
+       bnx2i_iscsi_unmap_sg_list(bnx2i_cmd);
+
+       hdr = (struct iscsi_cmd_rsp *)task->hdr;
+       resp_cqe = (struct bnx2i_cmd_response *)cqe;
+       hdr->opcode = resp_cqe->op_code;
+       hdr->max_cmdsn = cpu_to_be32(resp_cqe->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(resp_cqe->exp_cmd_sn);
+       hdr->response = resp_cqe->response;
+       hdr->cmd_status = resp_cqe->status;
+       hdr->flags = resp_cqe->response_flags;
+       hdr->residual_count = cpu_to_be32(resp_cqe->residual_count);
+
+       if (resp_cqe->op_code == ISCSI_OP_SCSI_DATA_IN)
+               goto done;
+
+       if (resp_cqe->status == SAM_STAT_CHECK_CONDITION) {
+               datalen = resp_cqe->data_length;
+               if (datalen < 2)
+                       goto done;
+
+               if (datalen > BNX2I_RQ_WQE_SIZE) {
+                       iscsi_conn_printk(KERN_ERR, conn,
+                                         "sense data len %d > RQ sz\n",
+                                         datalen);
+                       datalen = BNX2I_RQ_WQE_SIZE;
+               } else if (datalen > ISCSI_DEF_MAX_RECV_SEG_LEN) {
+                       iscsi_conn_printk(KERN_ERR, conn,
+                                         "sense data len %d > conn data\n",
+                                         datalen);
+                       datalen = ISCSI_DEF_MAX_RECV_SEG_LEN;
+               }
+
+               bnx2i_get_rq_buf(bnx2i_cmd->conn, conn->data, datalen);
+               bnx2i_put_rq_buf(bnx2i_cmd->conn, 1);
+       }
+
+done:
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr,
+                            conn->data, datalen);
+fail:
+       spin_unlock(&session->lock);
+       return 0;
+}
+
+
+/**
+ * bnx2i_process_login_resp - this function handles iscsi login response
+ * @session:           iscsi session pointer
+ * @bnx2i_conn:                iscsi connection pointer
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process Login Response CQE & complete it to open-iscsi user daemon
+ */
+static int bnx2i_process_login_resp(struct iscsi_session *session,
+                                   struct bnx2i_conn *bnx2i_conn,
+                                   struct cqe *cqe)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct iscsi_task *task;
+       struct bnx2i_login_response *login;
+       struct iscsi_login_rsp *resp_hdr;
+       int pld_len;
+       int pad_len;
+
+       login = (struct bnx2i_login_response *) cqe;
+       spin_lock(&session->lock);
+       task = iscsi_itt_to_task(conn,
+                                login->itt & ISCSI_LOGIN_RESPONSE_INDEX);
+       if (!task)
+               goto done;
+
+       resp_hdr = (struct iscsi_login_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr->opcode = login->op_code;
+       resp_hdr->flags = login->response_flags;
+       resp_hdr->max_version = login->version_max;
+       resp_hdr->active_version = login->version_active;;
+       resp_hdr->hlength = 0;
+
+       hton24(resp_hdr->dlength, login->data_length);
+       memcpy(resp_hdr->isid, &login->isid_lo, 6);
+       resp_hdr->tsih = cpu_to_be16(login->tsih);
+       resp_hdr->itt = task->hdr->itt;
+       resp_hdr->statsn = cpu_to_be32(login->stat_sn);
+       resp_hdr->exp_cmdsn = cpu_to_be32(login->exp_cmd_sn);
+       resp_hdr->max_cmdsn = cpu_to_be32(login->max_cmd_sn);
+       resp_hdr->status_class = login->status_class;
+       resp_hdr->status_detail = login->status_detail;
+       pld_len = login->data_length;
+       bnx2i_conn->gen_pdu.resp_wr_ptr =
+                                       bnx2i_conn->gen_pdu.resp_buf + pld_len;
+
+       pad_len = 0;
+       if (pld_len & 0x3)
+               pad_len = 4 - (pld_len % 4);
+
+       if (pad_len) {
+               int i = 0;
+               for (i = 0; i < pad_len; i++) {
+                       bnx2i_conn->gen_pdu.resp_wr_ptr[0] = 0;
+                       bnx2i_conn->gen_pdu.resp_wr_ptr++;
+               }
+       }
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr,
+               bnx2i_conn->gen_pdu.resp_buf,
+               bnx2i_conn->gen_pdu.resp_wr_ptr - bnx2i_conn->gen_pdu.resp_buf);
+done:
+       spin_unlock(&session->lock);
+       return 0;
+}
+
+/**
+ * bnx2i_process_tmf_resp - this function handles iscsi TMF response
+ * @session:           iscsi session pointer
+ * @bnx2i_conn:                iscsi connection pointer
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI TMF Response CQE and wake up the driver eh thread.
+ */
+static int bnx2i_process_tmf_resp(struct iscsi_session *session,
+                                 struct bnx2i_conn *bnx2i_conn,
+                                 struct cqe *cqe)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct iscsi_task *task;
+       struct bnx2i_tmf_response *tmf_cqe;
+       struct iscsi_tm_rsp *resp_hdr;
+
+       tmf_cqe = (struct bnx2i_tmf_response *)cqe;
+       spin_lock(&session->lock);
+       task = iscsi_itt_to_task(conn,
+                                tmf_cqe->itt & ISCSI_TMF_RESPONSE_INDEX);
+       if (!task)
+               goto done;
+
+       resp_hdr = (struct iscsi_tm_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr->opcode = tmf_cqe->op_code;
+       resp_hdr->max_cmdsn = cpu_to_be32(tmf_cqe->max_cmd_sn);
+       resp_hdr->exp_cmdsn = cpu_to_be32(tmf_cqe->exp_cmd_sn);
+       resp_hdr->itt = task->hdr->itt;
+       resp_hdr->response = tmf_cqe->response;
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+       spin_unlock(&session->lock);
+       return 0;
+}
+
+/**
+ * bnx2i_process_logout_resp - this function handles iscsi logout response
+ * @session:           iscsi session pointer
+ * @bnx2i_conn:                iscsi connection pointer
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI Logout Response CQE & make function call to
+ * notify the user daemon.
+ */
+static int bnx2i_process_logout_resp(struct iscsi_session *session,
+                                    struct bnx2i_conn *bnx2i_conn,
+                                    struct cqe *cqe)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct iscsi_task *task;
+       struct bnx2i_logout_response *logout;
+       struct iscsi_logout_rsp *resp_hdr;
+
+       logout = (struct bnx2i_logout_response *) cqe;
+       spin_lock(&session->lock);
+       task = iscsi_itt_to_task(conn,
+                                logout->itt & ISCSI_LOGOUT_RESPONSE_INDEX);
+       if (!task)
+               goto done;
+
+       resp_hdr = (struct iscsi_logout_rsp *) &bnx2i_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr->opcode = logout->op_code;
+       resp_hdr->flags = logout->response;
+       resp_hdr->hlength = 0;
+
+       resp_hdr->itt = task->hdr->itt;
+       resp_hdr->statsn = task->hdr->exp_statsn;
+       resp_hdr->exp_cmdsn = cpu_to_be32(logout->exp_cmd_sn);
+       resp_hdr->max_cmdsn = cpu_to_be32(logout->max_cmd_sn);
+
+       resp_hdr->t2wait = cpu_to_be32(logout->time_to_wait);
+       resp_hdr->t2retain = cpu_to_be32(logout->time_to_retain);
+
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)resp_hdr, NULL, 0);
+done:
+       spin_unlock(&session->lock);
+       return 0;
+}
+
+/**
+ * bnx2i_process_nopin_local_cmpl - this function handles iscsi nopin CQE
+ * @session:           iscsi session pointer
+ * @bnx2i_conn:                iscsi connection pointer
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI NOPIN local completion CQE, frees IIT and command structures
+ */
+static void bnx2i_process_nopin_local_cmpl(struct iscsi_session *session,
+                                          struct bnx2i_conn *bnx2i_conn,
+                                          struct cqe *cqe)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct bnx2i_nop_in_msg *nop_in;
+       struct iscsi_task *task;
+
+       nop_in = (struct bnx2i_nop_in_msg *)cqe;
+       spin_lock(&session->lock);
+       task = iscsi_itt_to_task(conn,
+                                nop_in->itt & ISCSI_NOP_IN_MSG_INDEX);
+       if (task)
+               iscsi_put_task(task);
+       spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_unsol_pdu_adjust_rq - makes adjustments to RQ after unsol pdu is recvd
+ * @conn:      iscsi connection
+ *
+ * Firmware advances RQ producer index for every unsolicited PDU even if
+ *     payload data length is '0'. This function makes corresponding
+ *     adjustments on the driver side to match this f/w behavior
+ */
+static void bnx2i_unsol_pdu_adjust_rq(struct bnx2i_conn *bnx2i_conn)
+{
+       char dummy_rq_data[2];
+       bnx2i_get_rq_buf(bnx2i_conn, dummy_rq_data, 1);
+       bnx2i_put_rq_buf(bnx2i_conn, 1);
+}
+
+
+/**
+ * bnx2i_process_nopin_mesg - this function handles iscsi nopin CQE
+ * @session:           iscsi session pointer
+ * @bnx2i_conn:                iscsi connection pointer
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI target's proactive iSCSI NOPIN request
+ */
+static int bnx2i_process_nopin_mesg(struct iscsi_session *session,
+                                    struct bnx2i_conn *bnx2i_conn,
+                                    struct cqe *cqe)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct iscsi_task *task;
+       struct bnx2i_nop_in_msg *nop_in;
+       struct iscsi_nopin *hdr;
+       u32 itt;
+       int tgt_async_nop = 0;
+
+       nop_in = (struct bnx2i_nop_in_msg *)cqe;
+       itt = nop_in->itt & ISCSI_NOP_IN_MSG_INDEX;
+
+       spin_lock(&session->lock);
+       hdr = (struct iscsi_nopin *)&bnx2i_conn->gen_pdu.resp_hdr;
+       memset(hdr, 0, sizeof(struct iscsi_hdr));
+       hdr->opcode = nop_in->op_code;
+       hdr->max_cmdsn = cpu_to_be32(nop_in->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(nop_in->exp_cmd_sn);
+       hdr->ttt = cpu_to_be32(nop_in->ttt);
+
+       if (itt == (u16) RESERVED_ITT) {
+               bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+               hdr->itt = RESERVED_ITT;
+               tgt_async_nop = 1;
+               goto done;
+       }
+
+       /* this is a response to one of our nop-outs */
+       task = iscsi_itt_to_task(conn, itt);
+       if (task) {
+               hdr->flags = ISCSI_FLAG_CMD_FINAL;
+               hdr->itt = task->hdr->itt;
+               hdr->ttt = cpu_to_be32(nop_in->ttt);
+               memcpy(hdr->lun, nop_in->lun, 8);
+       }
+done:
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, NULL, 0);
+       spin_unlock(&session->lock);
+
+       return tgt_async_nop;
+}
+
+
+/**
+ * bnx2i_process_async_mesg - this function handles iscsi async message
+ * @session:           iscsi session pointer
+ * @bnx2i_conn:                iscsi connection pointer
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI ASYNC Message
+ */
+static void bnx2i_process_async_mesg(struct iscsi_session *session,
+                                    struct bnx2i_conn *bnx2i_conn,
+                                    struct cqe *cqe)
+{
+       struct bnx2i_async_msg *async_cqe;
+       struct iscsi_async *resp_hdr;
+       u8 async_event;
+
+       bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+       async_cqe = (struct bnx2i_async_msg *)cqe;
+       async_event = async_cqe->async_event;
+
+       if (async_event == ISCSI_ASYNC_MSG_SCSI_EVENT) {
+               iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+                                 "async: scsi events not supported\n");
+               return;
+       }
+
+       spin_lock(&session->lock);
+       resp_hdr = (struct iscsi_async *) &bnx2i_conn->gen_pdu.resp_hdr;
+       memset(resp_hdr, 0, sizeof(struct iscsi_hdr));
+       resp_hdr->opcode = async_cqe->op_code;
+       resp_hdr->flags = 0x80;
+
+       memcpy(resp_hdr->lun, async_cqe->lun, 8);
+       resp_hdr->exp_cmdsn = cpu_to_be32(async_cqe->exp_cmd_sn);
+       resp_hdr->max_cmdsn = cpu_to_be32(async_cqe->max_cmd_sn);
+
+       resp_hdr->async_event = async_cqe->async_event;
+       resp_hdr->async_vcode = async_cqe->async_vcode;
+
+       resp_hdr->param1 = cpu_to_be16(async_cqe->param1);
+       resp_hdr->param2 = cpu_to_be16(async_cqe->param2);
+       resp_hdr->param3 = cpu_to_be16(async_cqe->param3);
+
+       __iscsi_complete_pdu(bnx2i_conn->cls_conn->dd_data,
+                            (struct iscsi_hdr *)resp_hdr, NULL, 0);
+       spin_unlock(&session->lock);
+}
+
+
+/**
+ * bnx2i_process_reject_mesg - process iscsi reject pdu
+ * @session:           iscsi session pointer
+ * @bnx2i_conn:                iscsi connection pointer
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process iSCSI REJECT message
+ */
+static void bnx2i_process_reject_mesg(struct iscsi_session *session,
+                                     struct bnx2i_conn *bnx2i_conn,
+                                     struct cqe *cqe)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct bnx2i_reject_msg *reject;
+       struct iscsi_reject *hdr;
+
+       reject = (struct bnx2i_reject_msg *) cqe;
+       if (reject->data_length) {
+               bnx2i_get_rq_buf(bnx2i_conn, conn->data, reject->data_length);
+               bnx2i_put_rq_buf(bnx2i_conn, 1);
+       } else
+               bnx2i_unsol_pdu_adjust_rq(bnx2i_conn);
+
+       spin_lock(&session->lock);
+       hdr = (struct iscsi_reject *) &bnx2i_conn->gen_pdu.resp_hdr;
+       memset(hdr, 0, sizeof(struct iscsi_hdr));
+       hdr->opcode = reject->op_code;
+       hdr->reason = reject->reason;
+       hton24(hdr->dlength, reject->data_length);
+       hdr->max_cmdsn = cpu_to_be32(reject->max_cmd_sn);
+       hdr->exp_cmdsn = cpu_to_be32(reject->exp_cmd_sn);
+       hdr->ffffffff = cpu_to_be32(RESERVED_ITT);
+       __iscsi_complete_pdu(conn, (struct iscsi_hdr *)hdr, conn->data,
+                            reject->data_length);
+       spin_unlock(&session->lock);
+}
+
+/**
+ * bnx2i_process_cmd_cleanup_resp - process scsi command clean-up completion
+ * @session:           iscsi session pointer
+ * @bnx2i_conn:                iscsi connection pointer
+ * @cqe:               pointer to newly DMA'ed CQE entry for processing
+ *
+ * process command cleanup response CQE during conn shutdown or error recovery
+ */
+static void bnx2i_process_cmd_cleanup_resp(struct iscsi_session *session,
+                                          struct bnx2i_conn *bnx2i_conn,
+                                          struct cqe *cqe)
+{
+       struct bnx2i_cleanup_response *cmd_clean_rsp;
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct iscsi_task *task;
+
+       cmd_clean_rsp = (struct bnx2i_cleanup_response *)cqe;
+       spin_lock(&session->lock);
+       task = iscsi_itt_to_task(conn,
+                       cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+       if (!task)
+               printk(KERN_ALERT "bnx2i: cmd clean ITT %x not active\n",
+                       cmd_clean_rsp->itt & ISCSI_CLEANUP_RESPONSE_INDEX);
+       spin_unlock(&session->lock);
+       complete(&bnx2i_conn->cmd_cleanup_cmpl);
+}
+
+
+
+/**
+ * bnx2i_process_new_cqes - process newly DMA'ed CQE's
+ * @bnx2i_conn:                iscsi connection
+ *
+ * this function is called by generic KCQ handler to process all pending CQE's
+ */
+static void bnx2i_process_new_cqes(struct bnx2i_conn *bnx2i_conn)
+{
+       struct iscsi_conn *conn = bnx2i_conn->cls_conn->dd_data;
+       struct iscsi_session *session = conn->session;
+       struct qp_info *qp = &bnx2i_conn->ep->qp;
+       struct bnx2i_nop_in_msg *nopin;
+       int tgt_async_msg;
+
+       while (1) {
+               nopin = (struct bnx2i_nop_in_msg *) qp->cq_cons_qe;
+               if (nopin->cq_req_sn != qp->cqe_exp_seq_sn)
+                       break;
+
+               if (unlikely(test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx)))
+                       break;
+
+               tgt_async_msg = 0;
+
+               switch (nopin->op_code) {
+               case ISCSI_OP_SCSI_CMD_RSP:
+               case ISCSI_OP_SCSI_DATA_IN:
+                       bnx2i_process_scsi_cmd_resp(session, bnx2i_conn,
+                                                   qp->cq_cons_qe);
+                       break;
+               case ISCSI_OP_LOGIN_RSP:
+                       bnx2i_process_login_resp(session, bnx2i_conn,
+                                                qp->cq_cons_qe);
+                       break;
+               case ISCSI_OP_SCSI_TMFUNC_RSP:
+                       bnx2i_process_tmf_resp(session, bnx2i_conn,
+                                              qp->cq_cons_qe);
+                       break;
+               case ISCSI_OP_LOGOUT_RSP:
+                       bnx2i_process_logout_resp(session, bnx2i_conn,
+                                                 qp->cq_cons_qe);
+                       break;
+               case ISCSI_OP_NOOP_IN:
+                       if (bnx2i_process_nopin_mesg(session, bnx2i_conn,
+                                                    qp->cq_cons_qe))
+                               tgt_async_msg = 1;
+                       break;
+               case ISCSI_OPCODE_NOPOUT_LOCAL_COMPLETION:
+                       bnx2i_process_nopin_local_cmpl(session, bnx2i_conn,
+                                                      qp->cq_cons_qe);
+                       break;
+               case ISCSI_OP_ASYNC_EVENT:
+                       bnx2i_process_async_mesg(session, bnx2i_conn,
+                                                qp->cq_cons_qe);
+                       tgt_async_msg = 1;
+                       break;
+               case ISCSI_OP_REJECT:
+                       bnx2i_process_reject_mesg(session, bnx2i_conn,
+                                                 qp->cq_cons_qe);
+                       break;
+               case ISCSI_OPCODE_CLEANUP_RESPONSE:
+                       bnx2i_process_cmd_cleanup_resp(session, bnx2i_conn,
+                                                      qp->cq_cons_qe);
+                       break;
+               default:
+                       printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+                                         nopin->op_code);
+               }
+
+               if (!tgt_async_msg)
+                       bnx2i_conn->ep->num_active_cmds--;
+
+               /* clear out in production version only, till beta keep opcode
+                * field intact, will be helpful in debugging (context dump)
+                * nopin->op_code = 0;
+                */
+               qp->cqe_exp_seq_sn++;
+               if (qp->cqe_exp_seq_sn == (qp->cqe_size * 2 + 1))
+                       qp->cqe_exp_seq_sn = ISCSI_INITIAL_SN;
+
+               if (qp->cq_cons_qe == qp->cq_last_qe) {
+                       qp->cq_cons_qe = qp->cq_first_qe;
+                       qp->cq_cons_idx = 0;
+               } else {
+                       qp->cq_cons_qe++;
+                       qp->cq_cons_idx++;
+               }
+       }
+       bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+}
+
+/**
+ * bnx2i_fastpath_notification - process global event queue (KCQ)
+ * @hba:               adapter structure pointer
+ * @new_cqe_kcqe:      pointer to newly DMA'ed KCQE entry
+ *
+ * Fast path event notification handler, KCQ entry carries context id
+ *     of the connection that has 1 or more pending CQ entries
+ */
+static void bnx2i_fastpath_notification(struct bnx2i_hba *hba,
+                                       struct iscsi_kcqe *new_cqe_kcqe)
+{
+       struct bnx2i_conn *conn;
+       u32 iscsi_cid;
+
+       iscsi_cid = new_cqe_kcqe->iscsi_conn_id;
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!conn) {
+               printk(KERN_ALERT "cid #%x not valid\n", iscsi_cid);
+               return;
+       }
+       if (!conn->ep) {
+               printk(KERN_ALERT "cid #%x - ep not bound\n", iscsi_cid);
+               return;
+       }
+
+       bnx2i_process_new_cqes(conn);
+}
+
+
+/**
+ * bnx2i_process_update_conn_cmpl - process iscsi conn update completion KCQE
+ * @hba:               adapter structure pointer
+ * @update_kcqe:       kcqe pointer
+ *
+ * CONN_UPDATE completion handler, this completes iSCSI connection FFP migration
+ */
+static void bnx2i_process_update_conn_cmpl(struct bnx2i_hba *hba,
+                                          struct iscsi_kcqe *update_kcqe)
+{
+       struct bnx2i_conn *conn;
+       u32 iscsi_cid;
+
+       iscsi_cid = update_kcqe->iscsi_conn_id;
+       conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!conn) {
+               printk(KERN_ALERT "conn_update: cid %x not valid\n", iscsi_cid);
+               return;
+       }
+       if (!conn->ep) {
+               printk(KERN_ALERT "cid %x does not have ep bound\n", iscsi_cid);
+               return;
+       }
+
+       if (update_kcqe->completion_status) {
+               printk(KERN_ALERT "request failed cid %x\n", iscsi_cid);
+               conn->ep->state = EP_STATE_ULP_UPDATE_FAILED;
+       } else
+               conn->ep->state = EP_STATE_ULP_UPDATE_COMPL;
+
+       wake_up_interruptible(&conn->ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_recovery_que_add_conn - add connection to recovery queue
+ * @hba:               adapter structure pointer
+ * @bnx2i_conn:                iscsi connection
+ *
+ * Add connection to recovery queue and schedule adapter eh worker
+ */
+static void bnx2i_recovery_que_add_conn(struct bnx2i_hba *hba,
+                                       struct bnx2i_conn *bnx2i_conn)
+{
+       iscsi_conn_failure(bnx2i_conn->cls_conn->dd_data,
+                          ISCSI_ERR_CONN_FAILED);
+}
+
+
+/**
+ * bnx2i_process_tcp_error - process error notification on a given connection
+ *
+ * @hba:               adapter structure pointer
+ * @tcp_err:           tcp error kcqe pointer
+ *
+ * handles tcp level error notifications from FW.
+ */
+static void bnx2i_process_tcp_error(struct bnx2i_hba *hba,
+                                   struct iscsi_kcqe *tcp_err)
+{
+       struct bnx2i_conn *bnx2i_conn;
+       u32 iscsi_cid;
+
+       iscsi_cid = tcp_err->iscsi_conn_id;
+       bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+
+       if (!bnx2i_conn) {
+               printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+               return;
+       }
+
+       printk(KERN_ALERT "bnx2i - cid 0x%x had TCP errors, error code 0x%x\n",
+                         iscsi_cid, tcp_err->completion_status);
+       bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+}
+
+
+/**
+ * bnx2i_process_iscsi_error - process error notification on a given connection
+ * @hba:               adapter structure pointer
+ * @iscsi_err:         iscsi error kcqe pointer
+ *
+ * handles iscsi error notifications from the FW. Firmware based in initial
+ *     handshake classifies iscsi protocol / TCP rfc violation into either
+ *     warning or error indications. If indication is of "Error" type, driver
+ *     will initiate session recovery for that connection/session. For
+ *     "Warning" type indication, driver will put out a system log message
+ *     (there will be only one message for each type for the life of the
+ *     session, this is to avoid un-necessarily overloading the system)
+ */
+static void bnx2i_process_iscsi_error(struct bnx2i_hba *hba,
+                                     struct iscsi_kcqe *iscsi_err)
+{
+       struct bnx2i_conn *bnx2i_conn;
+       u32 iscsi_cid;
+       char warn_notice[] = "iscsi_warning";
+       char error_notice[] = "iscsi_error";
+       char additional_notice[64];
+       char *message;
+       int need_recovery;
+       u64 err_mask64;
+
+       iscsi_cid = iscsi_err->iscsi_conn_id;
+       bnx2i_conn = bnx2i_get_conn_from_id(hba, iscsi_cid);
+       if (!bnx2i_conn) {
+               printk(KERN_ALERT "bnx2i - cid 0x%x not valid\n", iscsi_cid);
+               return;
+       }
+
+       err_mask64 = (0x1ULL << iscsi_err->completion_status);
+
+       if (err_mask64 & iscsi_error_mask) {
+               need_recovery = 0;
+               message = warn_notice;
+       } else {
+               need_recovery = 1;
+               message = error_notice;
+       }
+
+       switch (iscsi_err->completion_status) {
+       case ISCSI_KCQE_COMPLETION_STATUS_HDR_DIG_ERR:
+               strcpy(additional_notice, "hdr digest err");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_DATA_DIG_ERR:
+               strcpy(additional_notice, "data digest err");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_OPCODE:
+               strcpy(additional_notice, "wrong opcode rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_AHS_LEN:
+               strcpy(additional_notice, "AHS len > 0 rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ITT:
+               strcpy(additional_notice, "invalid ITT rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_STATSN:
+               strcpy(additional_notice, "wrong StatSN rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_EXP_DATASN:
+               strcpy(additional_notice, "wrong DataSN rcvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T:
+               strcpy(additional_notice, "pend R2T violation");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_0:
+               strcpy(additional_notice, "ERL0, UO");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_1:
+               strcpy(additional_notice, "ERL0, U1");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_2:
+               strcpy(additional_notice, "ERL0, U2");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_3:
+               strcpy(additional_notice, "ERL0, U3");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_4:
+               strcpy(additional_notice, "ERL0, U4");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_5:
+               strcpy(additional_notice, "ERL0, U5");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_O_U_6:
+               strcpy(additional_notice, "ERL0, U6");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_RCV_LEN:
+               strcpy(additional_notice, "invalid resi len");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_RCV_PDU_LEN:
+               strcpy(additional_notice, "MRDSL violation");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_F_BIT_ZERO:
+               strcpy(additional_notice, "F-bit not set");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_NOT_RSRV:
+               strcpy(additional_notice, "invalid TTT");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATASN:
+               strcpy(additional_notice, "invalid DataSN");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REMAIN_BURST_LEN:
+               strcpy(additional_notice, "burst len violation");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_BUFFER_OFF:
+               strcpy(additional_notice, "buf offset violation");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_LUN:
+               strcpy(additional_notice, "invalid LUN field");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_R2TSN:
+               strcpy(additional_notice, "invalid R2TSN field");
+               break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0      \
+       ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_0
+       case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_0:
+               strcpy(additional_notice, "invalid cmd len1");
+               break;
+#define BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1      \
+       ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DESIRED_DATA_TRNS_LEN_1
+       case BNX2I_ERR_DESIRED_DATA_TRNS_LEN_1:
+               strcpy(additional_notice, "invalid cmd len2");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_EXCEED:
+               strcpy(additional_notice,
+                      "pend r2t exceeds MaxOutstandingR2T value");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_TTT_IS_RSRV:
+               strcpy(additional_notice, "TTT is rsvd");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_MAX_BURST_LEN:
+               strcpy(additional_notice, "MBL violation");
+               break;
+#define BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO        \
+       ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_DATA_SEG_LEN_NOT_ZERO
+       case BNX2I_ERR_DATA_SEG_LEN_NOT_ZERO:
+               strcpy(additional_notice, "data seg len != 0");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_REJECT_PDU_LEN:
+               strcpy(additional_notice, "reject pdu len error");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_ASYNC_PDU_LEN:
+               strcpy(additional_notice, "async pdu len error");
+               break;
+       case ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_NOPIN_PDU_LEN:
+               strcpy(additional_notice, "nopin pdu len error");
+               break;
+#define BNX2_ERR_PEND_R2T_IN_CLEANUP                   \
+       ISCSI_KCQE_COMPLETION_STATUS_PROTOCOL_ERR_PEND_R2T_IN_CLEANUP
+       case BNX2_ERR_PEND_R2T_IN_CLEANUP:
+               strcpy(additional_notice, "pend r2t in cleanup");
+               break;
+
+       case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_FRAGMENT:
+               strcpy(additional_notice, "IP fragments rcvd");
+               break;
+       case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_IP_OPTIONS:
+               strcpy(additional_notice, "IP options error");
+               break;
+       case ISCI_KCQE_COMPLETION_STATUS_TCP_ERROR_URGENT_FLAG:
+               strcpy(additional_notice, "urgent flag error");
+               break;
+       default:
+               printk(KERN_ALERT "iscsi_err - unknown err %x\n",
+                                 iscsi_err->completion_status);
+       }
+
+       if (need_recovery) {
+               iscsi_conn_printk(KERN_ALERT,
+                                 bnx2i_conn->cls_conn->dd_data,
+                                 "bnx2i: %s - %s\n",
+                                 message, additional_notice);
+
+               iscsi_conn_printk(KERN_ALERT,
+                                 bnx2i_conn->cls_conn->dd_data,
+                                 "conn_err - hostno %d conn %p, "
+                                 "iscsi_cid %x cid %x\n",
+                                 bnx2i_conn->hba->shost->host_no,
+                                 bnx2i_conn, bnx2i_conn->ep->ep_iscsi_cid,
+                                 bnx2i_conn->ep->ep_cid);
+               bnx2i_recovery_que_add_conn(bnx2i_conn->hba, bnx2i_conn);
+       } else
+               if (!test_and_set_bit(iscsi_err->completion_status,
+                                     (void *) &bnx2i_conn->violation_notified))
+                       iscsi_conn_printk(KERN_ALERT,
+                                         bnx2i_conn->cls_conn->dd_data,
+                                         "bnx2i: %s - %s\n",
+                                         message, additional_notice);
+}
+
+
+/**
+ * bnx2i_process_conn_destroy_cmpl - process iscsi conn destroy completion
+ * @hba:               adapter structure pointer
+ * @conn_destroy:      conn destroy kcqe pointer
+ *
+ * handles connection destroy completion request.
+ */
+static void bnx2i_process_conn_destroy_cmpl(struct bnx2i_hba *hba,
+                                           struct iscsi_kcqe *conn_destroy)
+{
+       struct bnx2i_endpoint *ep;
+
+       ep = bnx2i_find_ep_in_destroy_list(hba, conn_destroy->iscsi_conn_id);
+       if (!ep) {
+               printk(KERN_ALERT "bnx2i_conn_destroy_cmpl: no pending "
+                                 "offload request, unexpected complection\n");
+               return;
+       }
+
+       if (hba != ep->hba) {
+               printk(KERN_ALERT "conn destroy- error hba mis-match\n");
+               return;
+       }
+
+       if (conn_destroy->completion_status) {
+               printk(KERN_ALERT "conn_destroy_cmpl: op failed\n");
+               ep->state = EP_STATE_CLEANUP_FAILED;
+       } else
+               ep->state = EP_STATE_CLEANUP_CMPL;
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_process_ofld_cmpl - process initial iscsi conn offload completion
+ * @hba:               adapter structure pointer
+ * @ofld_kcqe:         conn offload kcqe pointer
+ *
+ * handles initial connection offload completion, ep_connect() thread is
+ *     woken-up to continue with LLP connect process
+ */
+static void bnx2i_process_ofld_cmpl(struct bnx2i_hba *hba,
+                                   struct iscsi_kcqe *ofld_kcqe)
+{
+       u32 cid_addr;
+       struct bnx2i_endpoint *ep;
+       u32 cid_num;
+
+       ep = bnx2i_find_ep_in_ofld_list(hba, ofld_kcqe->iscsi_conn_id);
+       if (!ep) {
+               printk(KERN_ALERT "ofld_cmpl: no pend offload request\n");
+               return;
+       }
+
+       if (hba != ep->hba) {
+               printk(KERN_ALERT "ofld_cmpl: error hba mis-match\n");
+               return;
+       }
+
+       if (ofld_kcqe->completion_status) {
+               if (ofld_kcqe->completion_status ==
+                   ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE)
+                       printk(KERN_ALERT "bnx2i: unable to allocate"
+                                         " iSCSI context resources\n");
+               ep->state = EP_STATE_OFLD_FAILED;
+       } else {
+               ep->state = EP_STATE_OFLD_COMPL;
+               cid_addr = ofld_kcqe->iscsi_conn_context_id;
+               cid_num = bnx2i_get_cid_num(ep);
+               ep->ep_cid = cid_addr;
+               ep->qp.ctx_base = NULL;
+       }
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+/**
+ * bnx2i_indicate_kcqe - process iscsi conn update completion KCQE
+ * @hba:               adapter structure pointer
+ * @update_kcqe:       kcqe pointer
+ *
+ * Generic KCQ event handler/dispatcher
+ */
+static void bnx2i_indicate_kcqe(void *context, struct kcqe *kcqe[],
+                               u32 num_cqe)
+{
+       struct bnx2i_hba *hba = context;
+       int i = 0;
+       struct iscsi_kcqe *ikcqe = NULL;
+
+       while (i < num_cqe) {
+               ikcqe = (struct iscsi_kcqe *) kcqe[i++];
+
+               if (ikcqe->op_code ==
+                   ISCSI_KCQE_OPCODE_CQ_EVENT_NOTIFICATION)
+                       bnx2i_fastpath_notification(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_OFFLOAD_CONN)
+                       bnx2i_process_ofld_cmpl(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_UPDATE_CONN)
+                       bnx2i_process_update_conn_cmpl(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_INIT) {
+                       if (ikcqe->completion_status !=
+                           ISCSI_KCQE_COMPLETION_STATUS_SUCCESS)
+                               bnx2i_iscsi_license_error(hba, ikcqe->\
+                                                         completion_status);
+                       else {
+                               set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+                               bnx2i_get_link_state(hba);
+                               printk(KERN_INFO "bnx2i [%.2x:%.2x.%.2x]: "
+                                                "ISCSI_INIT passed\n",
+                                                (u8)hba->pcidev->bus->number,
+                                                hba->pci_devno,
+                                                (u8)hba->pci_func);
+
+
+                       }
+               } else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_DESTROY_CONN)
+                       bnx2i_process_conn_destroy_cmpl(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_ISCSI_ERROR)
+                       bnx2i_process_iscsi_error(hba, ikcqe);
+               else if (ikcqe->op_code == ISCSI_KCQE_OPCODE_TCP_ERROR)
+                       bnx2i_process_tcp_error(hba, ikcqe);
+               else
+                       printk(KERN_ALERT "bnx2i: unknown opcode 0x%x\n",
+                                         ikcqe->op_code);
+       }
+}
+
+
+/**
+ * bnx2i_indicate_netevent - Generic netdev event handler
+ * @context:   adapter structure pointer
+ * @event:     event type
+ *
+ * Handles four netdev events, NETDEV_UP, NETDEV_DOWN,
+ *     NETDEV_GOING_DOWN and NETDEV_CHANGE
+ */
+static void bnx2i_indicate_netevent(void *context, unsigned long event)
+{
+       struct bnx2i_hba *hba = context;
+
+       switch (event) {
+       case NETDEV_UP:
+               if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+                       bnx2i_send_fw_iscsi_init_msg(hba);
+               break;
+       case NETDEV_DOWN:
+               clear_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+               clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+               break;
+       case NETDEV_GOING_DOWN:
+               set_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state);
+               iscsi_host_for_each_session(hba->shost,
+                                           bnx2i_drop_session);
+               break;
+       case NETDEV_CHANGE:
+               bnx2i_get_link_state(hba);
+               break;
+       default:
+               ;
+       }
+}
+
+
+/**
+ * bnx2i_cm_connect_cmpl - process iscsi conn establishment completion
+ * @cm_sk:             cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *     indicate completion of option-2 TCP connect request.
+ */
+static void bnx2i_cm_connect_cmpl(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+               ep->state = EP_STATE_CONNECT_FAILED;
+       else if (test_bit(SK_F_OFFLD_COMPLETE, &cm_sk->flags))
+               ep->state = EP_STATE_CONNECT_COMPL;
+       else
+               ep->state = EP_STATE_CONNECT_FAILED;
+
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_close_cmpl - process tcp conn close completion
+ * @cm_sk:     cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *     indicate completion of option-2 graceful TCP connect shutdown
+ */
+static void bnx2i_cm_close_cmpl(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       ep->state = EP_STATE_DISCONN_COMPL;
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_abort_cmpl - process abortive tcp conn teardown completion
+ * @cm_sk:     cnic sock structure pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *     indicate completion of option-2 abortive TCP connect termination
+ */
+static void bnx2i_cm_abort_cmpl(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       ep->state = EP_STATE_DISCONN_COMPL;
+       wake_up_interruptible(&ep->ofld_wait);
+}
+
+
+/**
+ * bnx2i_cm_remote_close - process received TCP FIN
+ * @hba:               adapter structure pointer
+ * @update_kcqe:       kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to indicate
+ *     async TCP events such as FIN
+ */
+static void bnx2i_cm_remote_close(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       ep->state = EP_STATE_TCP_FIN_RCVD;
+       if (ep->conn)
+               bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+/**
+ * bnx2i_cm_remote_abort - process TCP RST and start conn cleanup
+ * @hba:               adapter structure pointer
+ * @update_kcqe:       kcqe pointer
+ *
+ * function callback exported via bnx2i - cnic driver interface to
+ *     indicate async TCP events (RST) sent by the peer.
+ */
+static void bnx2i_cm_remote_abort(struct cnic_sock *cm_sk)
+{
+       struct bnx2i_endpoint *ep = (struct bnx2i_endpoint *) cm_sk->context;
+
+       ep->state = EP_STATE_TCP_RST_RCVD;
+       if (ep->conn)
+               bnx2i_recovery_que_add_conn(ep->hba, ep->conn);
+}
+
+
+static void bnx2i_send_nl_mesg(struct cnic_dev *dev, u32 msg_type,
+                              char *buf, u16 buflen)
+{
+       struct bnx2i_hba *hba;
+
+       hba = bnx2i_find_hba_for_cnic(dev);
+       if (!hba)
+               return;
+
+       if (iscsi_offload_mesg(hba->shost, &bnx2i_iscsi_transport,
+                                  msg_type, buf, buflen))
+               printk(KERN_ALERT "bnx2i: private nl message send error\n");
+
+}
+
+
+/**
+ * bnx2i_cnic_cb - global template of bnx2i - cnic driver interface structure
+ *                     carrying callback function pointers
+ *
+ */
+struct cnic_ulp_ops bnx2i_cnic_cb = {
+       .cnic_init = bnx2i_ulp_init,
+       .cnic_exit = bnx2i_ulp_exit,
+       .cnic_start = bnx2i_start,
+       .cnic_stop = bnx2i_stop,
+       .indicate_kcqes = bnx2i_indicate_kcqe,
+       .indicate_netevent = bnx2i_indicate_netevent,
+       .cm_connect_complete = bnx2i_cm_connect_cmpl,
+       .cm_close_complete = bnx2i_cm_close_cmpl,
+       .cm_abort_complete = bnx2i_cm_abort_cmpl,
+       .cm_remote_close = bnx2i_cm_remote_close,
+       .cm_remote_abort = bnx2i_cm_remote_abort,
+       .iscsi_nl_send_msg = bnx2i_send_nl_mesg,
+       .owner = THIS_MODULE
+};
+
+
+/**
+ * bnx2i_map_ep_dbell_regs - map connection doorbell registers
+ * @ep: bnx2i endpoint
+ *
+ * maps connection's SQ and RQ doorbell registers, 5706/5708/5709 hosts these
+ *     register in BAR #0. Whereas in 57710 these register are accessed by
+ *     mapping BAR #1
+ */
+int bnx2i_map_ep_dbell_regs(struct bnx2i_endpoint *ep)
+{
+       u32 cid_num;
+       u32 reg_off;
+       u32 first_l4l5;
+       u32 ctx_sz;
+       u32 config2;
+       resource_size_t reg_base;
+
+       cid_num = bnx2i_get_cid_num(ep);
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &ep->hba->cnic_dev_type)) {
+               reg_base = pci_resource_start(ep->hba->pcidev,
+                                             BNX2X_DOORBELL_PCI_BAR);
+               reg_off = PAGE_SIZE * (cid_num & 0x1FFFF) + DPM_TRIGER_TYPE;
+               ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+               goto arm_cq;
+       }
+
+       reg_base = ep->hba->netdev->base_addr;
+       if ((test_bit(BNX2I_NX2_DEV_5709, &ep->hba->cnic_dev_type)) &&
+           (ep->hba->mail_queue_access == BNX2I_MQ_BIN_MODE)) {
+               config2 = REG_RD(ep->hba, BNX2_MQ_CONFIG2);
+               first_l4l5 = config2 & BNX2_MQ_CONFIG2_FIRST_L4L5;
+               ctx_sz = (config2 & BNX2_MQ_CONFIG2_CONT_SZ) >> 3;
+               if (ctx_sz)
+                       reg_off = CTX_OFFSET + MAX_CID_CNT * MB_KERNEL_CTX_SIZE
+                                 + PAGE_SIZE *
+                                 (((cid_num - first_l4l5) / ctx_sz) + 256);
+               else
+                       reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+       } else
+               /* 5709 device in normal node and 5706/5708 devices */
+               reg_off = CTX_OFFSET + (MB_KERNEL_CTX_SIZE * cid_num);
+
+       ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off,
+                                         MB_KERNEL_CTX_SIZE);
+       if (!ep->qp.ctx_base)
+               return -ENOMEM;
+
+arm_cq:
+       bnx2i_arm_cq_event_coalescing(ep, CNIC_ARM_CQE);
+       return 0;
+}
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
new file mode 100644 (file)
index 0000000..ae4b2d5
--- /dev/null
@@ -0,0 +1,438 @@
+/* bnx2i.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
+static u32 adapter_count;
+static int bnx2i_reg_device;
+
+#define DRV_MODULE_NAME                "bnx2i"
+#define DRV_MODULE_VERSION     "2.0.1d"
+#define DRV_MODULE_RELDATE     "Mar 25, 2009"
+
+static char version[] __devinitdata =
+               "Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
+               " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+
+
+MODULE_AUTHOR("Anil Veerabhadrappa <anilgv@broadcom.com>");
+MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709 iSCSI Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_MODULE_VERSION);
+
+static DEFINE_RWLOCK(bnx2i_dev_lock);
+
+unsigned int event_coal_div = 1;
+module_param(event_coal_div, int, 0664);
+MODULE_PARM_DESC(event_coal_div, "Event Coalescing Divide Factor");
+
+unsigned int en_tcp_dack = 1;
+module_param(en_tcp_dack, int, 0664);
+MODULE_PARM_DESC(en_tcp_dack, "Enable TCP Delayed ACK");
+
+unsigned int error_mask1 = 0x00;
+module_param(error_mask1, int, 0664);
+MODULE_PARM_DESC(error_mask1, "Config FW iSCSI Error Mask #1");
+
+unsigned int error_mask2 = 0x00;
+module_param(error_mask2, int, 0664);
+MODULE_PARM_DESC(error_mask2, "Config FW iSCSI Error Mask #2");
+
+unsigned int sq_size;
+module_param(sq_size, int, 0664);
+MODULE_PARM_DESC(sq_size, "Configure SQ size");
+
+unsigned int rq_size = BNX2I_RQ_WQES_DEFAULT;
+module_param(rq_size, int, 0664);
+MODULE_PARM_DESC(rq_size, "Configure RQ size");
+
+u64 iscsi_error_mask = 0x00;
+
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba) ;
+
+
+/**
+ * bnx2i_identify_device - identifies NetXtreme II device type
+ * @hba:               Adapter structure pointer
+ *
+ * This function identifies the NX2 device type and sets appropriate
+ *     queue mailbox register access method, 5709 requires driver to
+ *     access MBOX regs using *bin* mode
+ */
+void bnx2i_identify_device(struct bnx2i_hba *hba)
+{
+       hba->cnic_dev_type = 0;
+       if ((hba->pci_did == PCI_DEVICE_ID_NX2_5706) ||
+           (hba->pci_did == PCI_DEVICE_ID_NX2_5706S))
+               set_bit(BNX2I_NX2_DEV_5706, &hba->cnic_dev_type);
+       else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5708) ||
+           (hba->pci_did == PCI_DEVICE_ID_NX2_5708S))
+               set_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type);
+       else if ((hba->pci_did == PCI_DEVICE_ID_NX2_5709) ||
+           (hba->pci_did == PCI_DEVICE_ID_NX2_5709S)) {
+               set_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type);
+               hba->mail_queue_access = BNX2I_MQ_BIN_MODE;
+       } else if (hba->pci_did == PCI_DEVICE_ID_NX2_57710 ||
+                  hba->pci_did == PCI_DEVICE_ID_NX2_57711)
+               set_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type);
+}
+
+
+/**
+ * get_adapter_list_head - returns head of adapter list
+ */
+struct bnx2i_hba *get_adapter_list_head(void)
+{
+       struct bnx2i_hba *hba = NULL;
+       struct bnx2i_hba *tmp_hba;
+
+       if (!adapter_count)
+               goto hba_not_found;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each_entry(tmp_hba, &adapter_list, link) {
+               if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
+                       hba = tmp_hba;
+                       break;
+               }
+       }
+       read_unlock(&bnx2i_dev_lock);
+hba_not_found:
+       return hba;
+}
+
+
+/**
+ * bnx2i_find_hba_for_cnic - maps cnic device instance to bnx2i adapter instance
+ * @cnic:      pointer to cnic device instance
+ *
+ */
+struct bnx2i_hba *bnx2i_find_hba_for_cnic(struct cnic_dev *cnic)
+{
+       struct bnx2i_hba *hba, *temp;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each_entry_safe(hba, temp, &adapter_list, link) {
+               if (hba->cnic == cnic) {
+                       read_unlock(&bnx2i_dev_lock);
+                       return hba;
+               }
+       }
+       read_unlock(&bnx2i_dev_lock);
+       return NULL;
+}
+
+
+/**
+ * bnx2i_start - cnic callback to initialize & start adapter instance
+ * @handle:    transparent handle pointing to adapter structure
+ *
+ * This function maps adapter structure to pcidev structure and initiates
+ *     firmware handshake to enable/initialize on chip iscsi components
+ *     This bnx2i - cnic interface api callback is issued after following
+ *     2 conditions are met -
+ *       a) underlying network interface is up (marked by event 'NETDEV_UP'
+ *             from netdev
+ *       b) bnx2i adapter instance is registered
+ */
+void bnx2i_start(void *handle)
+{
+#define BNX2I_INIT_POLL_TIME   (1000 / HZ)
+       struct bnx2i_hba *hba = handle;
+       int i = HZ;
+
+       bnx2i_send_fw_iscsi_init_msg(hba);
+       while (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state) && i--)
+               msleep(BNX2I_INIT_POLL_TIME);
+}
+
+
+/**
+ * bnx2i_stop - cnic callback to shutdown adapter instance
+ * @handle:    transparent handle pointing to adapter structure
+ *
+ * driver checks if adapter is already in shutdown mode, if not start
+ *     the shutdown process
+ */
+void bnx2i_stop(void *handle)
+{
+       struct bnx2i_hba *hba = handle;
+
+       /* check if cleanup happened in GOING_DOWN context */
+       clear_bit(ADAPTER_STATE_UP, &hba->adapter_state);
+       if (!test_and_clear_bit(ADAPTER_STATE_GOING_DOWN,
+                               &hba->adapter_state))
+               iscsi_host_for_each_session(hba->shost,
+                                           bnx2i_drop_session);
+}
+
+/**
+ * bnx2i_register_device - register bnx2i adapter instance with the cnic driver
+ * @hba:       Adapter instance to register
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding the
+ *     adapter structure lock
+ */
+void bnx2i_register_device(struct bnx2i_hba *hba)
+{
+       if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+           test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               return;
+       }
+
+       hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
+
+       spin_lock(&hba->lock);
+       bnx2i_reg_device++;
+       spin_unlock(&hba->lock);
+
+       set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+
+/**
+ * bnx2i_reg_dev_all - registers all adapter instances with the cnic driver
+ *
+ * registers all bnx2i adapter instances with the cnic driver while holding
+ *     the global resource lock
+ */
+void bnx2i_reg_dev_all(void)
+{
+       struct bnx2i_hba *hba, *temp;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each_entry_safe(hba, temp, &adapter_list, link)
+               bnx2i_register_device(hba);
+       read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_unreg_one_device - unregister adapter instance with the cnic driver
+ * @hba:       Adapter instance to unregister
+ *
+ * registers bnx2i adapter instance with the cnic driver while holding
+ *     the adapter structure lock
+ */
+static void bnx2i_unreg_one_device(struct bnx2i_hba *hba)
+{
+       if (hba->ofld_conns_active ||
+           !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic) ||
+           test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state))
+               return;
+
+       hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+
+       spin_lock(&hba->lock);
+       bnx2i_reg_device--;
+       spin_unlock(&hba->lock);
+
+       /* ep_disconnect could come before NETDEV_DOWN, driver won't
+        * see NETDEV_DOWN as it already unregistered itself.
+        */
+       hba->adapter_state = 0;
+       clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+}
+
+/**
+ * bnx2i_unreg_dev_all - unregisters all bnx2i instances with the cnic driver
+ *
+ * unregisters all bnx2i adapter instances with the cnic driver while holding
+ *     the global resource lock
+ */
+void bnx2i_unreg_dev_all(void)
+{
+       struct bnx2i_hba *hba, *temp;
+
+       read_lock(&bnx2i_dev_lock);
+       list_for_each_entry_safe(hba, temp, &adapter_list, link)
+               bnx2i_unreg_one_device(hba);
+       read_unlock(&bnx2i_dev_lock);
+}
+
+
+/**
+ * bnx2i_init_one - initialize an adapter instance and allocate memory resources
+ * @hba:       bnx2i adapter instance
+ * @cnic:      cnic device handle
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *     below. This routine is called from cnic_register_driver() context and
+ *     work horse thread which does majority of device specific initialization
+ */
+static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
+{
+       int rc;
+
+       read_lock(&bnx2i_dev_lock);
+       if (bnx2i_reg_device &&
+           !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
+               if (rc)         /* duplicate registration */
+                       printk(KERN_ERR "bnx2i- dev reg failed\n");
+
+               spin_lock(&hba->lock);
+               bnx2i_reg_device++;
+               hba->age++;
+               spin_unlock(&hba->lock);
+
+               set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+       }
+       read_unlock(&bnx2i_dev_lock);
+
+       write_lock(&bnx2i_dev_lock);
+       list_add_tail(&hba->link, &adapter_list);
+       adapter_count++;
+       write_unlock(&bnx2i_dev_lock);
+       return 0;
+}
+
+
+/**
+ * bnx2i_ulp_init - initialize an adapter instance
+ * @dev:       cnic device handle
+ *
+ * Called from cnic_register_driver() context to initialize all enumerated
+ *     cnic devices. This routine allocate adapter structure and other
+ *     device specific resources.
+ */
+void bnx2i_ulp_init(struct cnic_dev *dev)
+{
+       struct bnx2i_hba *hba;
+
+       /* Allocate a HBA structure for this device */
+       hba = bnx2i_alloc_hba(dev);
+       if (!hba) {
+               printk(KERN_ERR "bnx2i init: hba initialization failed\n");
+               return;
+       }
+
+       /* Get PCI related information and update hba struct members */
+       clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+       if (bnx2i_init_one(hba, dev)) {
+               printk(KERN_ERR "bnx2i - hba %p init failed\n", hba);
+               bnx2i_free_hba(hba);
+       } else
+               hba->cnic = dev;
+}
+
+
+/**
+ * bnx2i_ulp_exit - shuts down adapter instance and frees all resources
+ * @dev:       cnic device handle
+ *
+ */
+void bnx2i_ulp_exit(struct cnic_dev *dev)
+{
+       struct bnx2i_hba *hba;
+
+       hba = bnx2i_find_hba_for_cnic(dev);
+       if (!hba) {
+               printk(KERN_INFO "bnx2i_ulp_exit: hba not "
+                                "found, dev 0x%p\n", dev);
+               return;
+       }
+       write_lock(&bnx2i_dev_lock);
+       list_del_init(&hba->link);
+       adapter_count--;
+
+       if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+               clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+
+               spin_lock(&hba->lock);
+               bnx2i_reg_device--;
+               spin_unlock(&hba->lock);
+       }
+       write_unlock(&bnx2i_dev_lock);
+
+       bnx2i_free_hba(hba);
+}
+
+
+/**
+ * bnx2i_mod_init - module init entry point
+ *
+ * initialize any driver wide global data structures such as endpoint pool,
+ *     tcp port manager/queue, sysfs. finally driver will register itself
+ *     with the cnic module
+ */
+static int __init bnx2i_mod_init(void)
+{
+       int err;
+
+       printk(KERN_INFO "%s", version);
+
+       if (!is_power_of_2(sq_size))
+               sq_size = roundup_pow_of_two(sq_size);
+
+       bnx2i_scsi_xport_template =
+                       iscsi_register_transport(&bnx2i_iscsi_transport);
+       if (!bnx2i_scsi_xport_template) {
+               printk(KERN_ERR "Could not register bnx2i transport.\n");
+               err = -ENOMEM;
+               goto out;
+       }
+
+       err = cnic_register_driver(CNIC_ULP_ISCSI, &bnx2i_cnic_cb);
+       if (err) {
+               printk(KERN_ERR "Could not register bnx2i cnic driver.\n");
+               goto unreg_xport;
+       }
+
+       return 0;
+
+unreg_xport:
+       iscsi_unregister_transport(&bnx2i_iscsi_transport);
+out:
+       return err;
+}
+
+
+/**
+ * bnx2i_mod_exit - module cleanup/exit entry point
+ *
+ * Global resource lock and host adapter lock is held during critical sections
+ *     in this function. Driver will browse through the adapter list, cleans-up
+ *     each instance, unregisters iscsi transport name and finally driver will
+ *     unregister itself with the cnic module
+ */
+static void __exit bnx2i_mod_exit(void)
+{
+       struct bnx2i_hba *hba;
+
+       write_lock(&bnx2i_dev_lock);
+       while (!list_empty(&adapter_list)) {
+               hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
+               list_del(&hba->link);
+               adapter_count--;
+
+               if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+                       hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
+                       clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+                       bnx2i_reg_device--;
+               }
+
+               write_unlock(&bnx2i_dev_lock);
+               bnx2i_free_hba(hba);
+               write_lock(&bnx2i_dev_lock);
+       }
+       write_unlock(&bnx2i_dev_lock);
+
+       iscsi_unregister_transport(&bnx2i_iscsi_transport);
+       cnic_unregister_driver(CNIC_ULP_ISCSI);
+}
+
+module_init(bnx2i_mod_init);
+module_exit(bnx2i_mod_exit);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
new file mode 100644 (file)
index 0000000..f741219
--- /dev/null
@@ -0,0 +1,2064 @@
+/*
+ * bnx2i_iscsi.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2006 - 2009 Broadcom Corporation
+ * Copyright (c) 2007, 2008 Red Hat, Inc.  All rights reserved.
+ * Copyright (c) 2007, 2008 Mike Christie
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include <scsi/scsi_tcq.h>
+#include <scsi/libiscsi.h>
+#include "bnx2i.h"
+
+struct scsi_transport_template *bnx2i_scsi_xport_template;
+struct iscsi_transport bnx2i_iscsi_transport;
+static struct scsi_host_template bnx2i_host_template;
+
+/*
+ * Global endpoint resource info
+ */
+static DEFINE_SPINLOCK(bnx2i_resc_lock); /* protects global resources */
+
+
+static int bnx2i_adapter_ready(struct bnx2i_hba *hba)
+{
+       int retval = 0;
+
+       if (!hba || !test_bit(ADAPTER_STATE_UP, &hba->adapter_state) ||
+           test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
+           test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+               retval = -EPERM;
+       return retval;
+}
+
+/**
+ * bnx2i_get_write_cmd_bd_idx - identifies various BD bookmarks
+ * @cmd:               iscsi cmd struct pointer
+ * @buf_off:           absolute buffer offset
+ * @start_bd_off:      u32 pointer to return the offset within the BD
+ *                     indicated by 'start_bd_idx' on which 'buf_off' falls
+ * @start_bd_idx:      index of the BD on which 'buf_off' falls
+ *
+ * identifies & marks various bd info for scsi command's imm data,
+ * unsolicited data and the first solicited data seq.
+ */
+static void bnx2i_get_write_cmd_bd_idx(struct bnx2i_cmd *cmd, u32 buf_off,
+                                      u32 *start_bd_off, u32 *start_bd_idx)
+{
+       struct iscsi_bd *bd_tbl = cmd->io_tbl.bd_tbl;
+       u32 cur_offset = 0;
+       u32 cur_bd_idx = 0;
+
+       if (buf_off) {
+               while (buf_off >= (cur_offset + bd_tbl->buffer_length)) {
+                       cur_offset += bd_tbl->buffer_length;
+                       cur_bd_idx++;
+                       bd_tbl++;
+               }
+       }
+
+       *start_bd_off = buf_off - cur_offset;
+       *start_bd_idx = cur_bd_idx;
+}
+
+/**
+ * bnx2i_setup_write_cmd_bd_info - sets up BD various information
+ * @task:      transport layer's cmd struct pointer
+ *
+ * identifies & marks various bd info for scsi command's immediate data,
+ * unsolicited data and first solicited data seq which includes BD start
+ * index & BD buf off. his function takes into account iscsi parameter such
+ * as immediate data and unsolicited data is support on this connection.
+ */
+static void bnx2i_setup_write_cmd_bd_info(struct iscsi_task *task)
+{
+       struct bnx2i_cmd *cmd = task->dd_data;
+       u32 start_bd_offset;
+       u32 start_bd_idx;
+       u32 buffer_offset = 0;
+       u32 cmd_len = cmd->req.total_data_transfer_length;
+
+       /* if ImmediateData is turned off & IntialR2T is turned on,
+        * there will be no immediate or unsolicited data, just return.
+        */
+       if (!iscsi_task_has_unsol_data(task) && !task->imm_count)
+               return;
+
+       /* Immediate data */
+       buffer_offset += task->imm_count;
+       if (task->imm_count == cmd_len)
+               return;
+
+       if (iscsi_task_has_unsol_data(task)) {
+               bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+                                          &start_bd_offset, &start_bd_idx);
+               cmd->req.ud_buffer_offset = start_bd_offset;
+               cmd->req.ud_start_bd_index = start_bd_idx;
+               buffer_offset += task->unsol_r2t.data_length;
+       }
+
+       if (buffer_offset != cmd_len) {
+               bnx2i_get_write_cmd_bd_idx(cmd, buffer_offset,
+                                          &start_bd_offset, &start_bd_idx);
+               if ((start_bd_offset > task->conn->session->first_burst) ||
+                   (start_bd_idx > scsi_sg_count(cmd->scsi_cmd))) {
+                       int i = 0;
+
+                       iscsi_conn_printk(KERN_ALERT, task->conn,
+                                         "bnx2i- error, buf offset 0x%x "
+                                         "bd_valid %d use_sg %d\n",
+                                         buffer_offset, cmd->io_tbl.bd_valid,
+                                         scsi_sg_count(cmd->scsi_cmd));
+                       for (i = 0; i < cmd->io_tbl.bd_valid; i++)
+                               iscsi_conn_printk(KERN_ALERT, task->conn,
+                                                 "bnx2i err, bd[%d]: len %x\n",
+                                                 i, cmd->io_tbl.bd_tbl[i].\
+                                                 buffer_length);
+               }
+               cmd->req.sd_buffer_offset = start_bd_offset;
+               cmd->req.sd_start_bd_index = start_bd_idx;
+       }
+}
+
+
+
+/**
+ * bnx2i_map_scsi_sg - maps IO buffer and prepares the BD table
+ * @hba:       adapter instance
+ * @cmd:       iscsi cmd struct pointer
+ *
+ * map SG list
+ */
+static int bnx2i_map_scsi_sg(struct bnx2i_hba *hba, struct bnx2i_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+       struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+       struct scatterlist *sg;
+       int byte_count = 0;
+       int bd_count = 0;
+       int sg_count;
+       int sg_len;
+       u64 addr;
+       int i;
+
+       BUG_ON(scsi_sg_count(sc) > ISCSI_MAX_BDS_PER_CMD);
+
+       sg_count = scsi_dma_map(sc);
+
+       scsi_for_each_sg(sc, sg, sg_count, i) {
+               sg_len = sg_dma_len(sg);
+               addr = (u64) sg_dma_address(sg);
+               bd[bd_count].buffer_addr_lo = addr & 0xffffffff;
+               bd[bd_count].buffer_addr_hi = addr >> 32;
+               bd[bd_count].buffer_length = sg_len;
+               bd[bd_count].flags = 0;
+               if (bd_count == 0)
+                       bd[bd_count].flags = ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+               byte_count += sg_len;
+               bd_count++;
+       }
+
+       if (bd_count)
+               bd[bd_count - 1].flags |= ISCSI_BD_LAST_IN_BD_CHAIN;
+
+       BUG_ON(byte_count != scsi_bufflen(sc));
+       return bd_count;
+}
+
+/**
+ * bnx2i_iscsi_map_sg_list - maps SG list
+ * @cmd:       iscsi cmd struct pointer
+ *
+ * creates BD list table for the command
+ */
+static void bnx2i_iscsi_map_sg_list(struct bnx2i_cmd *cmd)
+{
+       int bd_count;
+
+       bd_count  = bnx2i_map_scsi_sg(cmd->conn->hba, cmd);
+       if (!bd_count) {
+               struct iscsi_bd *bd = cmd->io_tbl.bd_tbl;
+
+               bd[0].buffer_addr_lo = bd[0].buffer_addr_hi = 0;
+               bd[0].buffer_length = bd[0].flags = 0;
+       }
+       cmd->io_tbl.bd_valid = bd_count;
+}
+
+
+/**
+ * bnx2i_iscsi_unmap_sg_list - unmaps SG list
+ * @cmd:       iscsi cmd struct pointer
+ *
+ * unmap IO buffers and invalidate the BD table
+ */
+void bnx2i_iscsi_unmap_sg_list(struct bnx2i_cmd *cmd)
+{
+       struct scsi_cmnd *sc = cmd->scsi_cmd;
+
+       if (cmd->io_tbl.bd_valid && sc) {
+               scsi_dma_unmap(sc);
+               cmd->io_tbl.bd_valid = 0;
+       }
+}
+
+static void bnx2i_setup_cmd_wqe_template(struct bnx2i_cmd *cmd)
+{
+       memset(&cmd->req, 0x00, sizeof(cmd->req));
+       cmd->req.op_code = 0xFF;
+       cmd->req.bd_list_addr_lo = (u32) cmd->io_tbl.bd_tbl_dma;
+       cmd->req.bd_list_addr_hi =
+               (u32) ((u64) cmd->io_tbl.bd_tbl_dma >> 32);
+
+}
+
+
+/**
+ * bnx2i_bind_conn_to_iscsi_cid - bind conn structure to 'iscsi_cid'
+ * @hba:       pointer to adapter instance
+ * @conn:      pointer to iscsi connection
+ * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
+ *
+ * update iscsi cid table entry with connection pointer. This enables
+ *     driver to quickly get hold of connection structure pointer in
+ *     completion/interrupt thread using iscsi context ID
+ */
+static int bnx2i_bind_conn_to_iscsi_cid(struct bnx2i_hba *hba,
+                                       struct bnx2i_conn *bnx2i_conn,
+                                       u32 iscsi_cid)
+{
+       if (hba && hba->cid_que.conn_cid_tbl[iscsi_cid]) {
+               iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+                                "conn bind - entry #%d not free\n", iscsi_cid);
+               return -EBUSY;
+       }
+
+       hba->cid_que.conn_cid_tbl[iscsi_cid] = bnx2i_conn;
+       return 0;
+}
+
+
+/**
+ * bnx2i_get_conn_from_id - maps an iscsi cid to corresponding conn ptr
+ * @hba:       pointer to adapter instance
+ * @iscsi_cid: iscsi context ID, range 0 - (MAX_CONN - 1)
+ */
+struct bnx2i_conn *bnx2i_get_conn_from_id(struct bnx2i_hba *hba,
+                                         u16 iscsi_cid)
+{
+       if (!hba->cid_que.conn_cid_tbl) {
+               printk(KERN_ERR "bnx2i: ERROR - missing conn<->cid table\n");
+               return NULL;
+
+       } else if (iscsi_cid >= hba->max_active_conns) {
+               printk(KERN_ERR "bnx2i: wrong cid #%d\n", iscsi_cid);
+               return NULL;
+       }
+       return hba->cid_que.conn_cid_tbl[iscsi_cid];
+}
+
+
+/**
+ * bnx2i_alloc_iscsi_cid - allocates a iscsi_cid from free pool
+ * @hba:       pointer to adapter instance
+ */
+static u32 bnx2i_alloc_iscsi_cid(struct bnx2i_hba *hba)
+{
+       int idx;
+
+       if (!hba->cid_que.cid_free_cnt)
+               return -1;
+
+       idx = hba->cid_que.cid_q_cons_idx;
+       hba->cid_que.cid_q_cons_idx++;
+       if (hba->cid_que.cid_q_cons_idx == hba->cid_que.cid_q_max_idx)
+               hba->cid_que.cid_q_cons_idx = 0;
+
+       hba->cid_que.cid_free_cnt--;
+       return hba->cid_que.cid_que[idx];
+}
+
+
+/**
+ * bnx2i_free_iscsi_cid - returns tcp port to free list
+ * @hba:               pointer to adapter instance
+ * @iscsi_cid:         iscsi context ID to free
+ */
+static void bnx2i_free_iscsi_cid(struct bnx2i_hba *hba, u16 iscsi_cid)
+{
+       int idx;
+
+       if (iscsi_cid == (u16) -1)
+               return;
+
+       hba->cid_que.cid_free_cnt++;
+
+       idx = hba->cid_que.cid_q_prod_idx;
+       hba->cid_que.cid_que[idx] = iscsi_cid;
+       hba->cid_que.conn_cid_tbl[iscsi_cid] = NULL;
+       hba->cid_que.cid_q_prod_idx++;
+       if (hba->cid_que.cid_q_prod_idx == hba->cid_que.cid_q_max_idx)
+               hba->cid_que.cid_q_prod_idx = 0;
+}
+
+
+/**
+ * bnx2i_setup_free_cid_que - sets up free iscsi cid queue
+ * @hba:       pointer to adapter instance
+ *
+ * allocates memory for iscsi cid queue & 'cid - conn ptr' mapping table,
+ *     and initialize table attributes
+ */
+static int bnx2i_setup_free_cid_que(struct bnx2i_hba *hba)
+{
+       int mem_size;
+       int i;
+
+       mem_size = hba->max_active_conns * sizeof(u32);
+       mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+
+       hba->cid_que.cid_que_base = kmalloc(mem_size, GFP_KERNEL);
+       if (!hba->cid_que.cid_que_base)
+               return -ENOMEM;
+
+       mem_size = hba->max_active_conns * sizeof(struct bnx2i_conn *);
+       mem_size = (mem_size + (PAGE_SIZE - 1)) & PAGE_MASK;
+       hba->cid_que.conn_cid_tbl = kmalloc(mem_size, GFP_KERNEL);
+       if (!hba->cid_que.conn_cid_tbl) {
+               kfree(hba->cid_que.cid_que_base);
+               hba->cid_que.cid_que_base = NULL;
+               return -ENOMEM;
+       }
+
+       hba->cid_que.cid_que = (u32 *)hba->cid_que.cid_que_base;
+       hba->cid_que.cid_q_prod_idx = 0;
+       hba->cid_que.cid_q_cons_idx = 0;
+       hba->cid_que.cid_q_max_idx = hba->max_active_conns;
+       hba->cid_que.cid_free_cnt = hba->max_active_conns;
+
+       for (i = 0; i < hba->max_active_conns; i++) {
+               hba->cid_que.cid_que[i] = i;
+               hba->cid_que.conn_cid_tbl[i] = NULL;
+       }
+       return 0;
+}
+
+
+/**
+ * bnx2i_release_free_cid_que - releases 'iscsi_cid' queue resources
+ * @hba:       pointer to adapter instance
+ */
+static void bnx2i_release_free_cid_que(struct bnx2i_hba *hba)
+{
+       kfree(hba->cid_que.cid_que_base);
+       hba->cid_que.cid_que_base = NULL;
+
+       kfree(hba->cid_que.conn_cid_tbl);
+       hba->cid_que.conn_cid_tbl = NULL;
+}
+
+
+/**
+ * bnx2i_alloc_ep - allocates ep structure from global pool
+ * @hba:       pointer to adapter instance
+ *
+ * routine allocates a free endpoint structure from global pool and
+ *     a tcp port to be used for this connection.  Global resource lock,
+ *     'bnx2i_resc_lock' is held while accessing shared global data structures
+ */
+static struct iscsi_endpoint *bnx2i_alloc_ep(struct bnx2i_hba *hba)
+{
+       struct iscsi_endpoint *ep;
+       struct bnx2i_endpoint *bnx2i_ep;
+
+       ep = iscsi_create_endpoint(sizeof(*bnx2i_ep));
+       if (!ep) {
+               printk(KERN_ERR "bnx2i: Could not allocate ep\n");
+               return NULL;
+       }
+
+       bnx2i_ep = ep->dd_data;
+       INIT_LIST_HEAD(&bnx2i_ep->link);
+       bnx2i_ep->state = EP_STATE_IDLE;
+       bnx2i_ep->hba = hba;
+       bnx2i_ep->hba_age = hba->age;
+       hba->ofld_conns_active++;
+       init_waitqueue_head(&bnx2i_ep->ofld_wait);
+       return ep;
+}
+
+
+/**
+ * bnx2i_free_ep - free endpoint
+ * @ep:                pointer to iscsi endpoint structure
+ */
+static void bnx2i_free_ep(struct iscsi_endpoint *ep)
+{
+       struct bnx2i_endpoint *bnx2i_ep = ep->dd_data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&bnx2i_resc_lock, flags);
+       bnx2i_ep->state = EP_STATE_IDLE;
+       bnx2i_ep->hba->ofld_conns_active--;
+
+       bnx2i_free_iscsi_cid(bnx2i_ep->hba, bnx2i_ep->ep_iscsi_cid);
+       if (bnx2i_ep->conn) {
+               bnx2i_ep->conn->ep = NULL;
+               bnx2i_ep->conn = NULL;
+       }
+
+       bnx2i_ep->hba = NULL;
+       spin_unlock_irqrestore(&bnx2i_resc_lock, flags);
+       iscsi_destroy_endpoint(ep);
+}
+
+
+/**
+ * bnx2i_alloc_bdt - allocates buffer descriptor (BD) table for the command
+ * @hba:       adapter instance pointer
+ * @session:   iscsi session pointer
+ * @cmd:       iscsi command structure
+ */
+static int bnx2i_alloc_bdt(struct bnx2i_hba *hba, struct iscsi_session *session,
+                          struct bnx2i_cmd *cmd)
+{
+       struct io_bdt *io = &cmd->io_tbl;
+       struct iscsi_bd *bd;
+
+       io->bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
+                                       ISCSI_MAX_BDS_PER_CMD * sizeof(*bd),
+                                       &io->bd_tbl_dma, GFP_KERNEL);
+       if (!io->bd_tbl) {
+               iscsi_session_printk(KERN_ERR, session, "Could not "
+                                    "allocate bdt.\n");
+               return -ENOMEM;
+       }
+       io->bd_valid = 0;
+       return 0;
+}
+
+/**
+ * bnx2i_destroy_cmd_pool - destroys iscsi command pool and release BD table
+ * @hba:       adapter instance pointer
+ * @session:   iscsi session pointer
+ * @cmd:       iscsi command structure
+ */
+static void bnx2i_destroy_cmd_pool(struct bnx2i_hba *hba,
+                                  struct iscsi_session *session)
+{
+       int i;
+
+       for (i = 0; i < session->cmds_max; i++) {
+               struct iscsi_task *task = session->cmds[i];
+               struct bnx2i_cmd *cmd = task->dd_data;
+
+               if (cmd->io_tbl.bd_tbl)
+                       dma_free_coherent(&hba->pcidev->dev,
+                                         ISCSI_MAX_BDS_PER_CMD *
+                                         sizeof(struct iscsi_bd),
+                                         cmd->io_tbl.bd_tbl,
+                                         cmd->io_tbl.bd_tbl_dma);
+       }
+
+}
+
+
+/**
+ * bnx2i_setup_cmd_pool - sets up iscsi command pool for the session
+ * @hba:       adapter instance pointer
+ * @session:   iscsi session pointer
+ */
+static int bnx2i_setup_cmd_pool(struct bnx2i_hba *hba,
+                               struct iscsi_session *session)
+{
+       int i;
+
+       for (i = 0; i < session->cmds_max; i++) {
+               struct iscsi_task *task = session->cmds[i];
+               struct bnx2i_cmd *cmd = task->dd_data;
+
+               /* Anil */
+               task->hdr = &cmd->hdr;
+               task->hdr_max = sizeof(struct iscsi_hdr);
+
+               if (bnx2i_alloc_bdt(hba, session, cmd))
+                       goto free_bdts;
+       }
+
+       return 0;
+
+free_bdts:
+       bnx2i_destroy_cmd_pool(hba, session);
+       return -ENOMEM;
+}
+
+
+/**
+ * bnx2i_setup_mp_bdt - allocate BD table resources
+ * @hba:       pointer to adapter structure
+ *
+ * Allocate memory for dummy buffer and associated BD
+ * table to be used by middle path (MP) requests
+ */
+static int bnx2i_setup_mp_bdt(struct bnx2i_hba *hba)
+{
+       int rc = 0;
+       struct iscsi_bd *mp_bdt;
+       u64 addr;
+
+       hba->mp_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                           &hba->mp_bd_dma, GFP_KERNEL);
+       if (!hba->mp_bd_tbl) {
+               printk(KERN_ERR "unable to allocate Middle Path BDT\n");
+               rc = -1;
+               goto out;
+       }
+
+       hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                              &hba->dummy_buf_dma, GFP_KERNEL);
+       if (!hba->dummy_buffer) {
+               printk(KERN_ERR "unable to alloc Middle Path Dummy Buffer\n");
+               dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                 hba->mp_bd_tbl, hba->mp_bd_dma);
+               hba->mp_bd_tbl = NULL;
+               rc = -1;
+               goto out;
+       }
+
+       mp_bdt = (struct iscsi_bd *) hba->mp_bd_tbl;
+       addr = (unsigned long) hba->dummy_buf_dma;
+       mp_bdt->buffer_addr_lo = addr & 0xffffffff;
+       mp_bdt->buffer_addr_hi = addr >> 32;
+       mp_bdt->buffer_length = PAGE_SIZE;
+       mp_bdt->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+                       ISCSI_BD_FIRST_IN_BD_CHAIN;
+out:
+       return rc;
+}
+
+
+/**
+ * bnx2i_free_mp_bdt - releases ITT back to free pool
+ * @hba:       pointer to adapter instance
+ *
+ * free MP dummy buffer and associated BD table
+ */
+static void bnx2i_free_mp_bdt(struct bnx2i_hba *hba)
+{
+       if (hba->mp_bd_tbl) {
+               dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                 hba->mp_bd_tbl, hba->mp_bd_dma);
+               hba->mp_bd_tbl = NULL;
+       }
+       if (hba->dummy_buffer) {
+               dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                 hba->dummy_buffer, hba->dummy_buf_dma);
+               hba->dummy_buffer = NULL;
+       }
+               return;
+}
+
+/**
+ * bnx2i_drop_session - notifies iscsid of connection error.
+ * @hba:       adapter instance pointer
+ * @session:   iscsi session pointer
+ *
+ * This notifies iscsid that there is a error, so it can initiate
+ * recovery.
+ *
+ * This relies on caller using the iscsi class iterator so the object
+ * is refcounted and does not disapper from under us.
+ */
+void bnx2i_drop_session(struct iscsi_cls_session *cls_session)
+{
+       iscsi_session_failure(cls_session->dd_data, ISCSI_ERR_CONN_FAILED);
+}
+
+/**
+ * bnx2i_ep_destroy_list_add - add an entry to EP destroy list
+ * @hba:       pointer to adapter instance
+ * @ep:                pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_add(struct bnx2i_hba *hba,
+                                    struct bnx2i_endpoint *ep)
+{
+       write_lock_bh(&hba->ep_rdwr_lock);
+       list_add_tail(&ep->link, &hba->ep_destroy_list);
+       write_unlock_bh(&hba->ep_rdwr_lock);
+       return 0;
+}
+
+/**
+ * bnx2i_ep_destroy_list_del - add an entry to EP destroy list
+ *
+ * @hba:               pointer to adapter instance
+ * @ep:                pointer to endpoint (transport indentifier) structure
+ *
+ * EP destroy queue manager
+ */
+static int bnx2i_ep_destroy_list_del(struct bnx2i_hba *hba,
+                                    struct bnx2i_endpoint *ep)
+{
+       write_lock_bh(&hba->ep_rdwr_lock);
+       list_del_init(&ep->link);
+       write_unlock_bh(&hba->ep_rdwr_lock);
+
+       return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_add - add an entry to ep offload pending list
+ * @hba:       pointer to adapter instance
+ * @ep:                pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_add(struct bnx2i_hba *hba,
+                                 struct bnx2i_endpoint *ep)
+{
+       write_lock_bh(&hba->ep_rdwr_lock);
+       list_add_tail(&ep->link, &hba->ep_ofld_list);
+       write_unlock_bh(&hba->ep_rdwr_lock);
+       return 0;
+}
+
+/**
+ * bnx2i_ep_ofld_list_del - add an entry to ep offload pending list
+ * @hba:               pointer to adapter instance
+ * @ep:                pointer to endpoint (transport indentifier) structure
+ *
+ * pending conn offload completion queue manager
+ */
+static int bnx2i_ep_ofld_list_del(struct bnx2i_hba *hba,
+                                 struct bnx2i_endpoint *ep)
+{
+       write_lock_bh(&hba->ep_rdwr_lock);
+       list_del_init(&ep->link);
+       write_unlock_bh(&hba->ep_rdwr_lock);
+       return 0;
+}
+
+
+/**
+ * bnx2i_find_ep_in_ofld_list - find iscsi_cid in pending list of endpoints
+ *
+ * @hba:               pointer to adapter instance
+ * @iscsi_cid:         iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_ofld_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+       struct list_head *list;
+       struct list_head *tmp;
+       struct bnx2i_endpoint *ep;
+
+       read_lock_bh(&hba->ep_rdwr_lock);
+       list_for_each_safe(list, tmp, &hba->ep_ofld_list) {
+               ep = (struct bnx2i_endpoint *)list;
+
+               if (ep->ep_iscsi_cid == iscsi_cid)
+                       break;
+               ep = NULL;
+       }
+       read_unlock_bh(&hba->ep_rdwr_lock);
+
+       if (!ep)
+               printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+       return ep;
+}
+
+
+/**
+ * bnx2i_find_ep_in_destroy_list - find iscsi_cid in destroy list
+ * @hba:               pointer to adapter instance
+ * @iscsi_cid:         iscsi context ID to find
+ *
+ */
+struct bnx2i_endpoint *
+bnx2i_find_ep_in_destroy_list(struct bnx2i_hba *hba, u32 iscsi_cid)
+{
+       struct list_head *list;
+       struct list_head *tmp;
+       struct bnx2i_endpoint *ep;
+
+       read_lock_bh(&hba->ep_rdwr_lock);
+       list_for_each_safe(list, tmp, &hba->ep_destroy_list) {
+               ep = (struct bnx2i_endpoint *)list;
+
+               if (ep->ep_iscsi_cid == iscsi_cid)
+                       break;
+               ep = NULL;
+       }
+       read_unlock_bh(&hba->ep_rdwr_lock);
+
+       if (!ep)
+               printk(KERN_ERR "l5 cid %d not found\n", iscsi_cid);
+
+       return ep;
+}
+
+/**
+ * bnx2i_setup_host_queue_size - assigns shost->can_queue param
+ * @hba:       pointer to adapter instance
+ * @shost:     scsi host pointer
+ *
+ * Initializes 'can_queue' parameter based on how many outstanding commands
+ *     the device can handle. Each device 5708/5709/57710 has different
+ *     capabilities
+ */
+static void bnx2i_setup_host_queue_size(struct bnx2i_hba *hba,
+                                       struct Scsi_Host *shost)
+{
+       if (test_bit(BNX2I_NX2_DEV_5708, &hba->cnic_dev_type))
+               shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+       else if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type))
+               shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5709;
+       else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+               shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_57710;
+       else
+               shost->can_queue = ISCSI_MAX_CMDS_PER_HBA_5708;
+}
+
+
+/**
+ * bnx2i_alloc_hba - allocate and init adapter instance
+ * @cnic:      cnic device pointer
+ *
+ * allocate & initialize adapter structure and call other
+ *     support routines to do per adapter initialization
+ */
+struct bnx2i_hba *bnx2i_alloc_hba(struct cnic_dev *cnic)
+{
+       struct Scsi_Host *shost;
+       struct bnx2i_hba *hba;
+
+       shost = iscsi_host_alloc(&bnx2i_host_template, sizeof(*hba), 0);
+       if (!shost)
+               return NULL;
+       shost->dma_boundary = cnic->pcidev->dma_mask;
+       shost->transportt = bnx2i_scsi_xport_template;
+       shost->max_id = ISCSI_MAX_CONNS_PER_HBA;
+       shost->max_channel = 0;
+       shost->max_lun = 512;
+       shost->max_cmd_len = 16;
+
+       hba = iscsi_host_priv(shost);
+       hba->shost = shost;
+       hba->netdev = cnic->netdev;
+       /* Get PCI related information and update hba struct members */
+       hba->pcidev = cnic->pcidev;
+       pci_dev_get(hba->pcidev);
+       hba->pci_did = hba->pcidev->device;
+       hba->pci_vid = hba->pcidev->vendor;
+       hba->pci_sdid = hba->pcidev->subsystem_device;
+       hba->pci_svid = hba->pcidev->subsystem_vendor;
+       hba->pci_func = PCI_FUNC(hba->pcidev->devfn);
+       hba->pci_devno = PCI_SLOT(hba->pcidev->devfn);
+       bnx2i_identify_device(hba);
+
+       bnx2i_identify_device(hba);
+       bnx2i_setup_host_queue_size(hba, shost);
+
+       if (test_bit(BNX2I_NX2_DEV_5709, &hba->cnic_dev_type)) {
+               hba->regview = ioremap_nocache(hba->netdev->base_addr,
+                                              BNX2_MQ_CONFIG2);
+               if (!hba->regview)
+                       goto ioreg_map_err;
+       } else if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+               hba->regview = ioremap_nocache(hba->netdev->base_addr, 4096);
+               if (!hba->regview)
+                       goto ioreg_map_err;
+       }
+
+       if (bnx2i_setup_mp_bdt(hba))
+               goto mp_bdt_mem_err;
+
+       INIT_LIST_HEAD(&hba->ep_ofld_list);
+       INIT_LIST_HEAD(&hba->ep_destroy_list);
+       rwlock_init(&hba->ep_rdwr_lock);
+
+       hba->mtu_supported = BNX2I_MAX_MTU_SUPPORTED;
+
+       /* different values for 5708/5709/57710 */
+       hba->max_active_conns = ISCSI_MAX_CONNS_PER_HBA;
+
+       if (bnx2i_setup_free_cid_que(hba))
+               goto cid_que_err;
+
+       /* SQ/RQ/CQ size can be changed via sysfx interface */
+       if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+               if (sq_size && sq_size <= BNX2I_5770X_SQ_WQES_MAX)
+                       hba->max_sqes = sq_size;
+               else
+                       hba->max_sqes = BNX2I_5770X_SQ_WQES_DEFAULT;
+       } else {        /* 5706/5708/5709 */
+               if (sq_size && sq_size <= BNX2I_570X_SQ_WQES_MAX)
+                       hba->max_sqes = sq_size;
+               else
+                       hba->max_sqes = BNX2I_570X_SQ_WQES_DEFAULT;
+       }
+
+       hba->max_rqes = rq_size;
+       hba->max_cqes = hba->max_sqes + rq_size;
+       if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type)) {
+               if (hba->max_cqes > BNX2I_5770X_CQ_WQES_MAX)
+                       hba->max_cqes = BNX2I_5770X_CQ_WQES_MAX;
+       } else if (hba->max_cqes > BNX2I_570X_CQ_WQES_MAX)
+               hba->max_cqes = BNX2I_570X_CQ_WQES_MAX;
+
+       hba->num_ccell = hba->max_sqes / 2;
+
+       spin_lock_init(&hba->lock);
+       mutex_init(&hba->net_dev_lock);
+
+       if (iscsi_host_add(shost, &hba->pcidev->dev))
+               goto free_dump_mem;
+       return hba;
+
+free_dump_mem:
+       bnx2i_release_free_cid_que(hba);
+cid_que_err:
+       bnx2i_free_mp_bdt(hba);
+mp_bdt_mem_err:
+       if (hba->regview) {
+               iounmap(hba->regview);
+               hba->regview = NULL;
+       }
+ioreg_map_err:
+       pci_dev_put(hba->pcidev);
+       scsi_host_put(shost);
+       return NULL;
+}
+
+/**
+ * bnx2i_free_hba- releases hba structure and resources held by the adapter
+ * @hba:       pointer to adapter instance
+ *
+ * free adapter structure and call various cleanup routines.
+ */
+void bnx2i_free_hba(struct bnx2i_hba *hba)
+{
+       struct Scsi_Host *shost = hba->shost;
+
+       iscsi_host_remove(shost);
+       INIT_LIST_HEAD(&hba->ep_ofld_list);
+       INIT_LIST_HEAD(&hba->ep_destroy_list);
+       pci_dev_put(hba->pcidev);
+
+       if (hba->regview) {
+               iounmap(hba->regview);
+               hba->regview = NULL;
+       }
+       bnx2i_free_mp_bdt(hba);
+       bnx2i_release_free_cid_que(hba);
+       iscsi_host_free(shost);
+}
+
+/**
+ * bnx2i_conn_free_login_resources - free DMA resources used for login process
+ * @hba:               pointer to adapter instance
+ * @bnx2i_conn:                iscsi connection pointer
+ *
+ * Login related resources, mostly BDT & payload DMA memory is freed
+ */
+static void bnx2i_conn_free_login_resources(struct bnx2i_hba *hba,
+                                           struct bnx2i_conn *bnx2i_conn)
+{
+       if (bnx2i_conn->gen_pdu.resp_bd_tbl) {
+               dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                 bnx2i_conn->gen_pdu.resp_bd_tbl,
+                                 bnx2i_conn->gen_pdu.resp_bd_dma);
+               bnx2i_conn->gen_pdu.resp_bd_tbl = NULL;
+       }
+
+       if (bnx2i_conn->gen_pdu.req_bd_tbl) {
+               dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                 bnx2i_conn->gen_pdu.req_bd_tbl,
+                                 bnx2i_conn->gen_pdu.req_bd_dma);
+               bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+       }
+
+       if (bnx2i_conn->gen_pdu.resp_buf) {
+               dma_free_coherent(&hba->pcidev->dev,
+                                 ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                 bnx2i_conn->gen_pdu.resp_buf,
+                                 bnx2i_conn->gen_pdu.resp_dma_addr);
+               bnx2i_conn->gen_pdu.resp_buf = NULL;
+       }
+
+       if (bnx2i_conn->gen_pdu.req_buf) {
+               dma_free_coherent(&hba->pcidev->dev,
+                                 ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                 bnx2i_conn->gen_pdu.req_buf,
+                                 bnx2i_conn->gen_pdu.req_dma_addr);
+               bnx2i_conn->gen_pdu.req_buf = NULL;
+       }
+}
+
+/**
+ * bnx2i_conn_alloc_login_resources - alloc DMA resources for login/nop.
+ * @hba:               pointer to adapter instance
+ * @bnx2i_conn:                iscsi connection pointer
+ *
+ * Mgmt task DNA resources are allocated in this routine.
+ */
+static int bnx2i_conn_alloc_login_resources(struct bnx2i_hba *hba,
+                                           struct bnx2i_conn *bnx2i_conn)
+{
+       /* Allocate memory for login request/response buffers */
+       bnx2i_conn->gen_pdu.req_buf =
+               dma_alloc_coherent(&hba->pcidev->dev,
+                                  ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                  &bnx2i_conn->gen_pdu.req_dma_addr,
+                                  GFP_KERNEL);
+       if (bnx2i_conn->gen_pdu.req_buf == NULL)
+               goto login_req_buf_failure;
+
+       bnx2i_conn->gen_pdu.req_buf_size = 0;
+       bnx2i_conn->gen_pdu.req_wr_ptr = bnx2i_conn->gen_pdu.req_buf;
+
+       bnx2i_conn->gen_pdu.resp_buf =
+               dma_alloc_coherent(&hba->pcidev->dev,
+                                  ISCSI_DEF_MAX_RECV_SEG_LEN,
+                                  &bnx2i_conn->gen_pdu.resp_dma_addr,
+                                  GFP_KERNEL);
+       if (bnx2i_conn->gen_pdu.resp_buf == NULL)
+               goto login_resp_buf_failure;
+
+       bnx2i_conn->gen_pdu.resp_buf_size = ISCSI_DEF_MAX_RECV_SEG_LEN;
+       bnx2i_conn->gen_pdu.resp_wr_ptr = bnx2i_conn->gen_pdu.resp_buf;
+
+       bnx2i_conn->gen_pdu.req_bd_tbl =
+               dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                  &bnx2i_conn->gen_pdu.req_bd_dma, GFP_KERNEL);
+       if (bnx2i_conn->gen_pdu.req_bd_tbl == NULL)
+               goto login_req_bd_tbl_failure;
+
+       bnx2i_conn->gen_pdu.resp_bd_tbl =
+               dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                                  &bnx2i_conn->gen_pdu.resp_bd_dma,
+                                  GFP_KERNEL);
+       if (bnx2i_conn->gen_pdu.resp_bd_tbl == NULL)
+               goto login_resp_bd_tbl_failure;
+
+       return 0;
+
+login_resp_bd_tbl_failure:
+       dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
+                         bnx2i_conn->gen_pdu.req_bd_tbl,
+                         bnx2i_conn->gen_pdu.req_bd_dma);
+       bnx2i_conn->gen_pdu.req_bd_tbl = NULL;
+
+login_req_bd_tbl_failure:
+       dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+                         bnx2i_conn->gen_pdu.resp_buf,
+                         bnx2i_conn->gen_pdu.resp_dma_addr);
+       bnx2i_conn->gen_pdu.resp_buf = NULL;
+login_resp_buf_failure:
+       dma_free_coherent(&hba->pcidev->dev, ISCSI_DEF_MAX_RECV_SEG_LEN,
+                         bnx2i_conn->gen_pdu.req_buf,
+                         bnx2i_conn->gen_pdu.req_dma_addr);
+       bnx2i_conn->gen_pdu.req_buf = NULL;
+login_req_buf_failure:
+       iscsi_conn_printk(KERN_ERR, bnx2i_conn->cls_conn->dd_data,
+                         "login resource alloc failed!!\n");
+       return -ENOMEM;
+
+}
+
+
+/**
+ * bnx2i_iscsi_prep_generic_pdu_bd - prepares BD table.
+ * @bnx2i_conn:                iscsi connection pointer
+ *
+ * Allocates buffers and BD tables before shipping requests to cnic
+ *     for PDUs prepared by 'iscsid' daemon
+ */
+static void bnx2i_iscsi_prep_generic_pdu_bd(struct bnx2i_conn *bnx2i_conn)
+{
+       struct iscsi_bd *bd_tbl;
+
+       bd_tbl = (struct iscsi_bd *) bnx2i_conn->gen_pdu.req_bd_tbl;
+
+       bd_tbl->buffer_addr_hi =
+               (u32) ((u64) bnx2i_conn->gen_pdu.req_dma_addr >> 32);
+       bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.req_dma_addr;
+       bd_tbl->buffer_length = bnx2i_conn->gen_pdu.req_wr_ptr -
+                               bnx2i_conn->gen_pdu.req_buf;
+       bd_tbl->reserved0 = 0;
+       bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+                       ISCSI_BD_FIRST_IN_BD_CHAIN;
+
+       bd_tbl = (struct iscsi_bd  *) bnx2i_conn->gen_pdu.resp_bd_tbl;
+       bd_tbl->buffer_addr_hi = (u64) bnx2i_conn->gen_pdu.resp_dma_addr >> 32;
+       bd_tbl->buffer_addr_lo = (u32) bnx2i_conn->gen_pdu.resp_dma_addr;
+       bd_tbl->buffer_length = ISCSI_DEF_MAX_RECV_SEG_LEN;
+       bd_tbl->reserved0 = 0;
+       bd_tbl->flags = ISCSI_BD_LAST_IN_BD_CHAIN |
+                       ISCSI_BD_FIRST_IN_BD_CHAIN;
+}
+
+
+/**
+ * bnx2i_iscsi_send_generic_request - called to send mgmt tasks.
+ * @task:      transport layer task pointer
+ *
+ * called to transmit PDUs prepared by the 'iscsid' daemon. iSCSI login,
+ *     Nop-out and Logout requests flow through this path.
+ */
+static int bnx2i_iscsi_send_generic_request(struct iscsi_task *task)
+{
+       struct bnx2i_cmd *cmd = task->dd_data;
+       struct bnx2i_conn *bnx2i_conn = cmd->conn;
+       int rc = 0;
+       char *buf;
+       int data_len;
+
+       bnx2i_iscsi_prep_generic_pdu_bd(bnx2i_conn);
+       switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
+       case ISCSI_OP_LOGIN:
+               bnx2i_send_iscsi_login(bnx2i_conn, task);
+               break;
+       case ISCSI_OP_NOOP_OUT:
+               data_len = bnx2i_conn->gen_pdu.req_buf_size;
+               buf = bnx2i_conn->gen_pdu.req_buf;
+               if (data_len)
+                       rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+                                                    RESERVED_ITT,
+                                                    buf, data_len, 1);
+               else
+                       rc = bnx2i_send_iscsi_nopout(bnx2i_conn, task,
+                                                    RESERVED_ITT,
+                                                    NULL, 0, 1);
+               break;
+       case ISCSI_OP_LOGOUT:
+               rc = bnx2i_send_iscsi_logout(bnx2i_conn, task);
+               break;
+       case ISCSI_OP_SCSI_TMFUNC:
+               rc = bnx2i_send_iscsi_tmf(bnx2i_conn, task);
+               break;
+       default:
+               iscsi_conn_printk(KERN_ALERT, bnx2i_conn->cls_conn->dd_data,
+                                 "send_gen: unsupported op 0x%x\n",
+                                 task->hdr->opcode);
+       }
+       return rc;
+}
+
+
+/**********************************************************************
+ *             SCSI-ML Interface
+ **********************************************************************/
+
+/**
+ * bnx2i_cpy_scsi_cdb - copies LUN & CDB fields in required format to sq wqe
+ * @sc:                SCSI-ML command pointer
+ * @cmd:       iscsi cmd pointer
+ */
+static void bnx2i_cpy_scsi_cdb(struct scsi_cmnd *sc, struct bnx2i_cmd *cmd)
+{
+       u32 dword;
+       int lpcnt;
+       u8 *srcp;
+       u32 *dstp;
+       u32 scsi_lun[2];
+
+       int_to_scsilun(sc->device->lun, (struct scsi_lun *) scsi_lun);
+       cmd->req.lun[0] = be32_to_cpu(scsi_lun[0]);
+       cmd->req.lun[1] = be32_to_cpu(scsi_lun[1]);
+
+       lpcnt = cmd->scsi_cmd->cmd_len / sizeof(dword);
+       srcp = (u8 *) sc->cmnd;
+       dstp = (u32 *) cmd->req.cdb;
+       while (lpcnt--) {
+               memcpy(&dword, (const void *) srcp, 4);
+               *dstp = cpu_to_be32(dword);
+               srcp += 4;
+               dstp++;
+       }
+       if (sc->cmd_len & 0x3) {
+               dword = (u32) srcp[0] | ((u32) srcp[1] << 8);
+               *dstp = cpu_to_be32(dword);
+       }
+}
+
+static void bnx2i_cleanup_task(struct iscsi_task *task)
+{
+       struct iscsi_conn *conn = task->conn;
+       struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+       struct bnx2i_hba *hba = bnx2i_conn->hba;
+
+       /*
+        * mgmt task or cmd was never sent to us to transmit.
+        */
+       if (!task->sc || task->state == ISCSI_TASK_PENDING)
+               return;
+       /*
+        * need to clean-up task context to claim dma buffers
+        */
+       if (task->state == ISCSI_TASK_ABRT_TMF) {
+               bnx2i_send_cmd_cleanup_req(hba, task->dd_data);
+
+               spin_unlock_bh(&conn->session->lock);
+               wait_for_completion_timeout(&bnx2i_conn->cmd_cleanup_cmpl,
+                               msecs_to_jiffies(ISCSI_CMD_CLEANUP_TIMEOUT));
+               spin_lock_bh(&conn->session->lock);
+       }
+       bnx2i_iscsi_unmap_sg_list(task->dd_data);
+}
+
+/**
+ * bnx2i_mtask_xmit - transmit mtask to chip for further processing
+ * @conn:      transport layer conn structure pointer
+ * @task:      transport layer command structure pointer
+ */
+static int
+bnx2i_mtask_xmit(struct iscsi_conn *conn, struct iscsi_task *task)
+{
+       struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+       struct bnx2i_cmd *cmd = task->dd_data;
+
+       memset(bnx2i_conn->gen_pdu.req_buf, 0, ISCSI_DEF_MAX_RECV_SEG_LEN);
+
+       bnx2i_setup_cmd_wqe_template(cmd);
+       bnx2i_conn->gen_pdu.req_buf_size = task->data_count;
+       if (task->data_count) {
+               memcpy(bnx2i_conn->gen_pdu.req_buf, task->data,
+                      task->data_count);
+               bnx2i_conn->gen_pdu.req_wr_ptr =
+                       bnx2i_conn->gen_pdu.req_buf + task->data_count;
+       }
+       cmd->conn = conn->dd_data;
+       cmd->scsi_cmd = NULL;
+       return bnx2i_iscsi_send_generic_request(task);
+}
+
+/**
+ * bnx2i_task_xmit - transmit iscsi command to chip for further processing
+ * @task:      transport layer command structure pointer
+ *
+ * maps SG buffers and send request to chip/firmware in the form of SQ WQE
+ */
+static int bnx2i_task_xmit(struct iscsi_task *task)
+{
+       struct iscsi_conn *conn = task->conn;
+       struct iscsi_session *session = conn->session;
+       struct Scsi_Host *shost = iscsi_session_to_shost(session->cls_session);
+       struct bnx2i_hba *hba = iscsi_host_priv(shost);
+       struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+       struct scsi_cmnd *sc = task->sc;
+       struct bnx2i_cmd *cmd = task->dd_data;
+       struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
+
+       if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
+               return -ENOTCONN;
+
+       if (!bnx2i_conn->is_bound)
+               return -ENOTCONN;
+
+       /*
+        * If there is no scsi_cmnd this must be a mgmt task
+        */
+       if (!sc)
+               return bnx2i_mtask_xmit(conn, task);
+
+       bnx2i_setup_cmd_wqe_template(cmd);
+       cmd->req.op_code = ISCSI_OP_SCSI_CMD;
+       cmd->conn = bnx2i_conn;
+       cmd->scsi_cmd = sc;
+       cmd->req.total_data_transfer_length = scsi_bufflen(sc);
+       cmd->req.cmd_sn = be32_to_cpu(hdr->cmdsn);
+
+       bnx2i_iscsi_map_sg_list(cmd);
+       bnx2i_cpy_scsi_cdb(sc, cmd);
+
+       cmd->req.op_attr = ISCSI_ATTR_SIMPLE;
+       if (sc->sc_data_direction == DMA_TO_DEVICE) {
+               cmd->req.op_attr |= ISCSI_CMD_REQUEST_WRITE;
+               cmd->req.itt = task->itt |
+                       (ISCSI_TASK_TYPE_WRITE << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+               bnx2i_setup_write_cmd_bd_info(task);
+       } else {
+               if (scsi_bufflen(sc))
+                       cmd->req.op_attr |= ISCSI_CMD_REQUEST_READ;
+               cmd->req.itt = task->itt |
+                       (ISCSI_TASK_TYPE_READ << ISCSI_CMD_REQUEST_TYPE_SHIFT);
+       }
+
+       cmd->req.num_bds = cmd->io_tbl.bd_valid;
+       if (!cmd->io_tbl.bd_valid) {
+               cmd->req.bd_list_addr_lo = (u32) hba->mp_bd_dma;
+               cmd->req.bd_list_addr_hi = (u32) ((u64) hba->mp_bd_dma >> 32);
+               cmd->req.num_bds = 1;
+       }
+
+       bnx2i_send_iscsi_scsicmd(bnx2i_conn, cmd);
+       return 0;
+}
+
+/**
+ * bnx2i_session_create - create a new iscsi session
+ * @cmds_max:          max commands supported
+ * @qdepth:            scsi queue depth to support
+ * @initial_cmdsn:     initial iscsi CMDSN to be used for this session
+ *
+ * Creates a new iSCSI session instance on given device.
+ */
+static struct iscsi_cls_session *
+bnx2i_session_create(struct iscsi_endpoint *ep,
+                    uint16_t cmds_max, uint16_t qdepth,
+                    uint32_t initial_cmdsn)
+{
+       struct Scsi_Host *shost;
+       struct iscsi_cls_session *cls_session;
+       struct bnx2i_hba *hba;
+       struct bnx2i_endpoint *bnx2i_ep;
+
+       if (!ep) {
+               printk(KERN_ERR "bnx2i: missing ep.\n");
+               return NULL;
+       }
+
+       bnx2i_ep = ep->dd_data;
+       shost = bnx2i_ep->hba->shost;
+       hba = iscsi_host_priv(shost);
+       if (bnx2i_adapter_ready(hba))
+               return NULL;
+
+       /*
+        * user can override hw limit as long as it is within
+        * the min/max.
+        */
+       if (cmds_max > hba->max_sqes)
+               cmds_max = hba->max_sqes;
+       else if (cmds_max < BNX2I_SQ_WQES_MIN)
+               cmds_max = BNX2I_SQ_WQES_MIN;
+
+       cls_session = iscsi_session_setup(&bnx2i_iscsi_transport, shost,
+                                         cmds_max, sizeof(struct bnx2i_cmd),
+                                         initial_cmdsn, ISCSI_MAX_TARGET);
+       if (!cls_session)
+               return NULL;
+
+       if (bnx2i_setup_cmd_pool(hba, cls_session->dd_data))
+               goto session_teardown;
+       return cls_session;
+
+session_teardown:
+       iscsi_session_teardown(cls_session);
+       return NULL;
+}
+
+
+/**
+ * bnx2i_session_destroy - destroys iscsi session
+ * @cls_session:       pointer to iscsi cls session
+ *
+ * Destroys previously created iSCSI session instance and releases
+ *     all resources held by it
+ */
+static void bnx2i_session_destroy(struct iscsi_cls_session *cls_session)
+{
+       struct iscsi_session *session = cls_session->dd_data;
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct bnx2i_hba *hba = iscsi_host_priv(shost);
+
+       bnx2i_destroy_cmd_pool(hba, session);
+       iscsi_session_teardown(cls_session);
+}
+
+
+/**
+ * bnx2i_conn_create - create iscsi connection instance
+ * @cls_session:       pointer to iscsi cls session
+ * @cid:               iscsi cid as per rfc (not NX2's CID terminology)
+ *
+ * Creates a new iSCSI connection instance for a given session
+ */
+static struct iscsi_cls_conn *
+bnx2i_conn_create(struct iscsi_cls_session *cls_session, uint32_t cid)
+{
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct bnx2i_hba *hba = iscsi_host_priv(shost);
+       struct bnx2i_conn *bnx2i_conn;
+       struct iscsi_cls_conn *cls_conn;
+       struct iscsi_conn *conn;
+
+       cls_conn = iscsi_conn_setup(cls_session, sizeof(*bnx2i_conn),
+                                   cid);
+       if (!cls_conn)
+               return NULL;
+       conn = cls_conn->dd_data;
+
+       bnx2i_conn = conn->dd_data;
+       bnx2i_conn->cls_conn = cls_conn;
+       bnx2i_conn->hba = hba;
+       /* 'ep' ptr will be assigned in bind() call */
+       bnx2i_conn->ep = NULL;
+       init_completion(&bnx2i_conn->cmd_cleanup_cmpl);
+
+       if (bnx2i_conn_alloc_login_resources(hba, bnx2i_conn)) {
+               iscsi_conn_printk(KERN_ALERT, conn,
+                                 "conn_new: login resc alloc failed!!\n");
+               goto free_conn;
+       }
+
+       return cls_conn;
+
+free_conn:
+       iscsi_conn_teardown(cls_conn);
+       return NULL;
+}
+
+/**
+ * bnx2i_conn_bind - binds iscsi sess, conn and ep objects together
+ * @cls_session:       pointer to iscsi cls session
+ * @cls_conn:          pointer to iscsi cls conn
+ * @transport_fd:      64-bit EP handle
+ * @is_leading:                leading connection on this session?
+ *
+ * Binds together iSCSI session instance, iSCSI connection instance
+ *     and the TCP connection. This routine returns error code if
+ *     TCP connection does not belong on the device iSCSI sess/conn
+ *     is bound
+ */
+static int bnx2i_conn_bind(struct iscsi_cls_session *cls_session,
+                          struct iscsi_cls_conn *cls_conn,
+                          uint64_t transport_fd, int is_leading)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+       struct Scsi_Host *shost = iscsi_session_to_shost(cls_session);
+       struct bnx2i_hba *hba = iscsi_host_priv(shost);
+       struct bnx2i_endpoint *bnx2i_ep;
+       struct iscsi_endpoint *ep;
+       int ret_code;
+
+       ep = iscsi_lookup_endpoint(transport_fd);
+       if (!ep)
+               return -EINVAL;
+
+       bnx2i_ep = ep->dd_data;
+       if ((bnx2i_ep->state == EP_STATE_TCP_FIN_RCVD) ||
+           (bnx2i_ep->state == EP_STATE_TCP_RST_RCVD))
+               /* Peer disconnect via' FIN or RST */
+               return -EINVAL;
+
+       if (iscsi_conn_bind(cls_session, cls_conn, is_leading))
+               return -EINVAL;
+
+       if (bnx2i_ep->hba != hba) {
+               /* Error - TCP connection does not belong to this device
+                */
+               iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+                                 "conn bind, ep=0x%p (%s) does not",
+                                 bnx2i_ep, bnx2i_ep->hba->netdev->name);
+               iscsi_conn_printk(KERN_ALERT, cls_conn->dd_data,
+                                 "belong to hba (%s)\n",
+                                 hba->netdev->name);
+               return -EEXIST;
+       }
+
+       bnx2i_ep->conn = bnx2i_conn;
+       bnx2i_conn->ep = bnx2i_ep;
+       bnx2i_conn->iscsi_conn_cid = bnx2i_ep->ep_iscsi_cid;
+       bnx2i_conn->fw_cid = bnx2i_ep->ep_cid;
+       bnx2i_conn->is_bound = 1;
+
+       ret_code = bnx2i_bind_conn_to_iscsi_cid(hba, bnx2i_conn,
+                                               bnx2i_ep->ep_iscsi_cid);
+
+       /* 5706/5708/5709 FW takes RQ as full when initiated, but for 57710
+        * driver needs to explicitly replenish RQ index during setup.
+        */
+       if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+               bnx2i_put_rq_buf(bnx2i_conn, 0);
+
+       bnx2i_arm_cq_event_coalescing(bnx2i_conn->ep, CNIC_ARM_CQE);
+       return ret_code;
+}
+
+
+/**
+ * bnx2i_conn_destroy - destroy iscsi connection instance & release resources
+ * @cls_conn:  pointer to iscsi cls conn
+ *
+ * Destroy an iSCSI connection instance and release memory resources held by
+ *     this connection
+ */
+static void bnx2i_conn_destroy(struct iscsi_cls_conn *cls_conn)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+       struct Scsi_Host *shost;
+       struct bnx2i_hba *hba;
+
+       shost = iscsi_session_to_shost(iscsi_conn_to_session(cls_conn));
+       hba = iscsi_host_priv(shost);
+
+       bnx2i_conn_free_login_resources(hba, bnx2i_conn);
+       iscsi_conn_teardown(cls_conn);
+}
+
+
+/**
+ * bnx2i_conn_get_param - return iscsi connection parameter to caller
+ * @cls_conn:  pointer to iscsi cls conn
+ * @param:     parameter type identifier
+ * @buf:       buffer pointer
+ *
+ * returns iSCSI connection parameters
+ */
+static int bnx2i_conn_get_param(struct iscsi_cls_conn *cls_conn,
+                               enum iscsi_param param, char *buf)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+       int len = 0;
+
+       switch (param) {
+       case ISCSI_PARAM_CONN_PORT:
+               if (bnx2i_conn->ep)
+                       len = sprintf(buf, "%hu\n",
+                                     bnx2i_conn->ep->cm_sk->dst_port);
+               break;
+       case ISCSI_PARAM_CONN_ADDRESS:
+               if (bnx2i_conn->ep)
+                       len = sprintf(buf, NIPQUAD_FMT "\n",
+                                     NIPQUAD(bnx2i_conn->ep->cm_sk->dst_ip));
+               break;
+       default:
+               return iscsi_conn_get_param(cls_conn, param, buf);
+       }
+
+       return len;
+}
+
+/**
+ * bnx2i_host_get_param - returns host (adapter) related parameters
+ * @shost:     scsi host pointer
+ * @param:     parameter type identifier
+ * @buf:       buffer pointer
+ */
+static int bnx2i_host_get_param(struct Scsi_Host *shost,
+                               enum iscsi_host_param param, char *buf)
+{
+       struct bnx2i_hba *hba = iscsi_host_priv(shost);
+       int len = 0;
+
+       switch (param) {
+       case ISCSI_HOST_PARAM_HWADDRESS:
+               len = sysfs_format_mac(buf, hba->cnic->mac_addr, 6);
+               break;
+       case ISCSI_HOST_PARAM_NETDEV_NAME:
+               len = sprintf(buf, "%s\n", hba->netdev->name);
+               break;
+       default:
+               return iscsi_host_get_param(shost, param, buf);
+       }
+       return len;
+}
+
+/**
+ * bnx2i_conn_start - completes iscsi connection migration to FFP
+ * @cls_conn:  pointer to iscsi cls conn
+ *
+ * last call in FFP migration to handover iscsi conn to the driver
+ */
+static int bnx2i_conn_start(struct iscsi_cls_conn *cls_conn)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+       struct bnx2i_conn *bnx2i_conn = conn->dd_data;
+
+       bnx2i_conn->ep->state = EP_STATE_ULP_UPDATE_START;
+       bnx2i_update_iscsi_conn(conn);
+
+       /*
+        * this should normally not sleep for a long time so it should
+        * not disrupt the caller.
+        */
+       bnx2i_conn->ep->ofld_timer.expires = 1 * HZ + jiffies;
+       bnx2i_conn->ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+       bnx2i_conn->ep->ofld_timer.data = (unsigned long) bnx2i_conn->ep;
+       add_timer(&bnx2i_conn->ep->ofld_timer);
+       /* update iSCSI context for this conn, wait for CNIC to complete */
+       wait_event_interruptible(bnx2i_conn->ep->ofld_wait,
+                       bnx2i_conn->ep->state != EP_STATE_ULP_UPDATE_START);
+
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&bnx2i_conn->ep->ofld_timer);
+
+       iscsi_conn_start(cls_conn);
+       return 0;
+}
+
+
+/**
+ * bnx2i_conn_get_stats - returns iSCSI stats
+ * @cls_conn:  pointer to iscsi cls conn
+ * @stats:     pointer to iscsi statistic struct
+ */
+static void bnx2i_conn_get_stats(struct iscsi_cls_conn *cls_conn,
+                                struct iscsi_stats *stats)
+{
+       struct iscsi_conn *conn = cls_conn->dd_data;
+
+       stats->txdata_octets = conn->txdata_octets;
+       stats->rxdata_octets = conn->rxdata_octets;
+       stats->scsicmd_pdus = conn->scsicmd_pdus_cnt;
+       stats->dataout_pdus = conn->dataout_pdus_cnt;
+       stats->scsirsp_pdus = conn->scsirsp_pdus_cnt;
+       stats->datain_pdus = conn->datain_pdus_cnt;
+       stats->r2t_pdus = conn->r2t_pdus_cnt;
+       stats->tmfcmd_pdus = conn->tmfcmd_pdus_cnt;
+       stats->tmfrsp_pdus = conn->tmfrsp_pdus_cnt;
+       stats->custom_length = 3;
+       strcpy(stats->custom[2].desc, "eh_abort_cnt");
+       stats->custom[2].value = conn->eh_abort_cnt;
+       stats->digest_err = 0;
+       stats->timeout_err = 0;
+       stats->custom_length = 0;
+}
+
+
+/**
+ * bnx2i_check_route - checks if target IP route belongs to one of NX2 devices
+ * @dst_addr:  target IP address
+ *
+ * check if route resolves to BNX2 device
+ */
+static struct bnx2i_hba *bnx2i_check_route(struct sockaddr *dst_addr)
+{
+       struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+       struct bnx2i_hba *hba;
+       struct cnic_dev *cnic = NULL;
+
+       bnx2i_reg_dev_all();
+
+       hba = get_adapter_list_head();
+       if (hba && hba->cnic)
+               cnic = hba->cnic->cm_select_dev(desti, CNIC_ULP_ISCSI);
+       if (!cnic) {
+               printk(KERN_ALERT "bnx2i: no route,"
+                      "can't connect using cnic\n");
+               goto no_nx2_route;
+       }
+       hba = bnx2i_find_hba_for_cnic(cnic);
+       if (!hba)
+               goto no_nx2_route;
+
+       if (bnx2i_adapter_ready(hba)) {
+               printk(KERN_ALERT "bnx2i: check route, hba not found\n");
+               goto no_nx2_route;
+       }
+       if (hba->netdev->mtu > hba->mtu_supported) {
+               printk(KERN_ALERT "bnx2i: %s network i/f mtu is set to %d\n",
+                                 hba->netdev->name, hba->netdev->mtu);
+               printk(KERN_ALERT "bnx2i: iSCSI HBA can support mtu of %d\n",
+                                 hba->mtu_supported);
+               goto no_nx2_route;
+       }
+       return hba;
+no_nx2_route:
+       return NULL;
+}
+
+
+/**
+ * bnx2i_tear_down_conn - tear down iscsi/tcp connection and free resources
+ * @hba:       pointer to adapter instance
+ * @ep:                endpoint (transport indentifier) structure
+ *
+ * destroys cm_sock structure and on chip iscsi context
+ */
+static int bnx2i_tear_down_conn(struct bnx2i_hba *hba,
+                                struct bnx2i_endpoint *ep)
+{
+       if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic))
+               hba->cnic->cm_destroy(ep->cm_sk);
+
+       if (test_bit(ADAPTER_STATE_GOING_DOWN, &ep->hba->adapter_state))
+               ep->state = EP_STATE_DISCONN_COMPL;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type) &&
+           ep->state == EP_STATE_DISCONN_TIMEDOUT) {
+               printk(KERN_ALERT "bnx2i - ERROR - please submit GRC Dump,"
+                                 " NW/PCIe trace, driver msgs to developers"
+                                 " for analysis\n");
+               return 1;
+       }
+
+       ep->state = EP_STATE_CLEANUP_START;
+       init_timer(&ep->ofld_timer);
+       ep->ofld_timer.expires = 10*HZ + jiffies;
+       ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+       ep->ofld_timer.data = (unsigned long) ep;
+       add_timer(&ep->ofld_timer);
+
+       bnx2i_ep_destroy_list_add(hba, ep);
+
+       /* destroy iSCSI context, wait for it to complete */
+       bnx2i_send_conn_destroy(hba, ep);
+       wait_event_interruptible(ep->ofld_wait,
+                                (ep->state != EP_STATE_CLEANUP_START));
+
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&ep->ofld_timer);
+
+       bnx2i_ep_destroy_list_del(hba, ep);
+
+       if (ep->state != EP_STATE_CLEANUP_CMPL)
+               /* should never happen */
+               printk(KERN_ALERT "bnx2i - conn destroy failed\n");
+
+       return 0;
+}
+
+
+/**
+ * bnx2i_ep_connect - establish TCP connection to target portal
+ * @shost:             scsi host
+ * @dst_addr:          target IP address
+ * @non_blocking:      blocking or non-blocking call
+ *
+ * this routine initiates the TCP/IP connection by invoking Option-2 i/f
+ *     with l5_core and the CNIC. This is a multi-step process of resolving
+ *     route to target, create a iscsi connection context, handshaking with
+ *     CNIC module to create/initialize the socket struct and finally
+ *     sending down option-2 request to complete TCP 3-way handshake
+ */
+static struct iscsi_endpoint *bnx2i_ep_connect(struct Scsi_Host *shost,
+                                              struct sockaddr *dst_addr,
+                                              int non_blocking)
+{
+       u32 iscsi_cid = BNX2I_CID_RESERVED;
+       struct sockaddr_in *desti = (struct sockaddr_in *) dst_addr;
+       struct sockaddr_in6 *desti6;
+       struct bnx2i_endpoint *bnx2i_ep;
+       struct bnx2i_hba *hba;
+       struct cnic_dev *cnic;
+       struct cnic_sockaddr saddr;
+       struct iscsi_endpoint *ep;
+       int rc = 0;
+
+       if (shost)
+               /* driver is given scsi host to work with */
+               hba = iscsi_host_priv(shost);
+       else
+               /*
+                * check if the given destination can be reached through
+                * a iscsi capable NetXtreme2 device
+                */
+               hba = bnx2i_check_route(dst_addr);
+       if (!hba) {
+               rc = -ENOMEM;
+               goto check_busy;
+       }
+
+       cnic = hba->cnic;
+       ep = bnx2i_alloc_ep(hba);
+       if (!ep) {
+               rc = -ENOMEM;
+               goto check_busy;
+       }
+       bnx2i_ep = ep->dd_data;
+
+       mutex_lock(&hba->net_dev_lock);
+       if (bnx2i_adapter_ready(hba)) {
+               rc = -EPERM;
+               goto net_if_down;
+       }
+
+       bnx2i_ep->state = EP_STATE_IDLE;
+       bnx2i_ep->ep_iscsi_cid = (u16) -1;
+       bnx2i_ep->num_active_cmds = 0;
+       iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
+       if (iscsi_cid == -1) {
+               printk(KERN_ALERT "alloc_ep: unable to allocate iscsi cid\n");
+               rc = -ENOMEM;
+               goto iscsi_cid_err;
+       }
+       bnx2i_ep->hba_age = hba->age;
+
+       rc = bnx2i_alloc_qp_resc(hba, bnx2i_ep);
+       if (rc != 0) {
+               printk(KERN_ALERT "bnx2i: ep_conn, alloc QP resc error\n");
+               rc = -ENOMEM;
+               goto qp_resc_err;
+       }
+
+       bnx2i_ep->ep_iscsi_cid = (u16)iscsi_cid;
+       bnx2i_ep->state = EP_STATE_OFLD_START;
+       bnx2i_ep_ofld_list_add(hba, bnx2i_ep);
+
+       init_timer(&bnx2i_ep->ofld_timer);
+       bnx2i_ep->ofld_timer.expires = 2 * HZ + jiffies;
+       bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+       bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+       add_timer(&bnx2i_ep->ofld_timer);
+
+       bnx2i_send_conn_ofld_req(hba, bnx2i_ep);
+
+       /* Wait for CNIC hardware to setup conn context and return 'cid' */
+       wait_event_interruptible(bnx2i_ep->ofld_wait,
+                                bnx2i_ep->state != EP_STATE_OFLD_START);
+
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&bnx2i_ep->ofld_timer);
+
+       bnx2i_ep_ofld_list_del(hba, bnx2i_ep);
+
+       if (bnx2i_ep->state != EP_STATE_OFLD_COMPL) {
+               rc = -ENOSPC;
+               goto conn_failed;
+       }
+
+       rc = cnic->cm_create(cnic, CNIC_ULP_ISCSI, bnx2i_ep->ep_cid,
+                            iscsi_cid, &bnx2i_ep->cm_sk, bnx2i_ep);
+       if (rc) {
+               rc = -EINVAL;
+               goto conn_failed;
+       }
+
+       bnx2i_ep->cm_sk->rcv_buf = 256 * 1024;
+       bnx2i_ep->cm_sk->snd_buf = 256 * 1024;
+       clear_bit(SK_TCP_TIMESTAMP, &bnx2i_ep->cm_sk->tcp_flags);
+
+       memset(&saddr, 0, sizeof(saddr));
+       if (dst_addr->sa_family == AF_INET) {
+               desti = (struct sockaddr_in *) dst_addr;
+               saddr.remote.v4 = *desti;
+               saddr.local.v4.sin_family = desti->sin_family;
+       } else if (dst_addr->sa_family == AF_INET6) {
+               desti6 = (struct sockaddr_in6 *) dst_addr;
+               saddr.remote.v6 = *desti6;
+               saddr.local.v6.sin6_family = desti6->sin6_family;
+       }
+
+       bnx2i_ep->timestamp = jiffies;
+       bnx2i_ep->state = EP_STATE_CONNECT_START;
+       if (!test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               rc = -EINVAL;
+               goto conn_failed;
+       } else
+               rc = cnic->cm_connect(bnx2i_ep->cm_sk, &saddr);
+
+       if (rc)
+               goto release_ep;
+
+       if (bnx2i_map_ep_dbell_regs(bnx2i_ep))
+               goto release_ep;
+       mutex_unlock(&hba->net_dev_lock);
+       return ep;
+
+release_ep:
+       if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+               mutex_unlock(&hba->net_dev_lock);
+               return ERR_PTR(rc);
+       }
+conn_failed:
+net_if_down:
+iscsi_cid_err:
+       bnx2i_free_qp_resc(hba, bnx2i_ep);
+qp_resc_err:
+       bnx2i_free_ep(ep);
+       mutex_unlock(&hba->net_dev_lock);
+check_busy:
+       bnx2i_unreg_dev_all();
+       return ERR_PTR(rc);
+}
+
+
+/**
+ * bnx2i_ep_poll - polls for TCP connection establishement
+ * @ep:                        TCP connection (endpoint) handle
+ * @timeout_ms:                timeout value in milli secs
+ *
+ * polls for TCP connect request to complete
+ */
+static int bnx2i_ep_poll(struct iscsi_endpoint *ep, int timeout_ms)
+{
+       struct bnx2i_endpoint *bnx2i_ep;
+       int rc = 0;
+
+       bnx2i_ep = ep->dd_data;
+       if ((bnx2i_ep->state == EP_STATE_IDLE) ||
+           (bnx2i_ep->state == EP_STATE_CONNECT_FAILED) ||
+           (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+               return -1;
+       if (bnx2i_ep->state == EP_STATE_CONNECT_COMPL)
+               return 1;
+
+       rc = wait_event_interruptible_timeout(bnx2i_ep->ofld_wait,
+                                             ((bnx2i_ep->state ==
+                                               EP_STATE_OFLD_FAILED) ||
+                                              (bnx2i_ep->state ==
+                                               EP_STATE_CONNECT_FAILED) ||
+                                              (bnx2i_ep->state ==
+                                               EP_STATE_CONNECT_COMPL)),
+                                             msecs_to_jiffies(timeout_ms));
+       if (!rc || (bnx2i_ep->state == EP_STATE_OFLD_FAILED))
+               rc = -1;
+
+       if (rc > 0)
+               return 1;
+       else if (!rc)
+               return 0;       /* timeout */
+       else
+               return rc;
+}
+
+
+/**
+ * bnx2i_ep_tcp_conn_active - check EP state transition
+ * @ep:                endpoint pointer
+ *
+ * check if underlying TCP connection is active
+ */
+static int bnx2i_ep_tcp_conn_active(struct bnx2i_endpoint *bnx2i_ep)
+{
+       int ret;
+       int cnic_dev_10g = 0;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &bnx2i_ep->hba->cnic_dev_type))
+               cnic_dev_10g = 1;
+
+       switch (bnx2i_ep->state) {
+       case EP_STATE_CONNECT_START:
+       case EP_STATE_CLEANUP_FAILED:
+       case EP_STATE_OFLD_FAILED:
+       case EP_STATE_DISCONN_TIMEDOUT:
+               ret = 0;
+               break;
+       case EP_STATE_CONNECT_COMPL:
+       case EP_STATE_ULP_UPDATE_START:
+       case EP_STATE_ULP_UPDATE_COMPL:
+       case EP_STATE_TCP_FIN_RCVD:
+       case EP_STATE_ULP_UPDATE_FAILED:
+               ret = 1;
+               break;
+       case EP_STATE_TCP_RST_RCVD:
+               ret = 0;
+               break;
+       case EP_STATE_CONNECT_FAILED:
+               if (cnic_dev_10g)
+                       ret = 1;
+               else
+                       ret = 0;
+               break;
+       default:
+               ret = 0;
+       }
+
+       return ret;
+}
+
+
+/**
+ * bnx2i_ep_disconnect - executes TCP connection teardown process
+ * @ep:                TCP connection (endpoint) handle
+ *
+ * executes  TCP connection teardown process
+ */
+static void bnx2i_ep_disconnect(struct iscsi_endpoint *ep)
+{
+       struct bnx2i_endpoint *bnx2i_ep;
+       struct bnx2i_conn *bnx2i_conn = NULL;
+       struct iscsi_session *session = NULL;
+       struct iscsi_conn *conn;
+       struct cnic_dev *cnic;
+       struct bnx2i_hba *hba;
+
+       bnx2i_ep = ep->dd_data;
+
+       /* driver should not attempt connection cleanup untill TCP_CONNECT
+        * completes either successfully or fails. Timeout is 9-secs, so
+        * wait for it to complete
+        */
+       while ((bnx2i_ep->state == EP_STATE_CONNECT_START) &&
+               !time_after(jiffies, bnx2i_ep->timestamp + (12 * HZ)))
+               msleep(250);
+
+       if (bnx2i_ep->conn) {
+               bnx2i_conn = bnx2i_ep->conn;
+               conn = bnx2i_conn->cls_conn->dd_data;
+               session = conn->session;
+
+               spin_lock_bh(&session->lock);
+               bnx2i_conn->is_bound = 0;
+               spin_unlock_bh(&session->lock);
+       }
+
+       hba = bnx2i_ep->hba;
+       if (bnx2i_ep->state == EP_STATE_IDLE)
+               goto return_bnx2i_ep;
+       cnic = hba->cnic;
+
+       mutex_lock(&hba->net_dev_lock);
+
+       if (!test_bit(ADAPTER_STATE_UP, &hba->adapter_state))
+               goto free_resc;
+       if (bnx2i_ep->hba_age != hba->age)
+               goto free_resc;
+
+       if (!bnx2i_ep_tcp_conn_active(bnx2i_ep))
+               goto destory_conn;
+
+       bnx2i_ep->state = EP_STATE_DISCONN_START;
+
+       init_timer(&bnx2i_ep->ofld_timer);
+       bnx2i_ep->ofld_timer.expires = 10*HZ + jiffies;
+       bnx2i_ep->ofld_timer.function = bnx2i_ep_ofld_timer;
+       bnx2i_ep->ofld_timer.data = (unsigned long) bnx2i_ep;
+       add_timer(&bnx2i_ep->ofld_timer);
+
+       if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
+               int close = 0;
+
+               if (session) {
+                       spin_lock_bh(&session->lock);
+                       if (session->state == ISCSI_STATE_LOGGING_OUT)
+                               close = 1;
+                       spin_unlock_bh(&session->lock);
+               }
+               if (close)
+                       cnic->cm_close(bnx2i_ep->cm_sk);
+               else
+                       cnic->cm_abort(bnx2i_ep->cm_sk);
+       } else
+               goto free_resc;
+
+       /* wait for option-2 conn teardown */
+       wait_event_interruptible(bnx2i_ep->ofld_wait,
+                                bnx2i_ep->state != EP_STATE_DISCONN_START);
+
+       if (signal_pending(current))
+               flush_signals(current);
+       del_timer_sync(&bnx2i_ep->ofld_timer);
+
+destory_conn:
+       if (bnx2i_tear_down_conn(hba, bnx2i_ep)) {
+               mutex_unlock(&hba->net_dev_lock);
+               return;
+       }
+free_resc:
+       mutex_unlock(&hba->net_dev_lock);
+       bnx2i_free_qp_resc(hba, bnx2i_ep);
+return_bnx2i_ep:
+       if (bnx2i_conn)
+               bnx2i_conn->ep = NULL;
+
+       bnx2i_free_ep(ep);
+
+       if (!hba->ofld_conns_active)
+               bnx2i_unreg_dev_all();
+}
+
+
+/**
+ * bnx2i_nl_set_path - ISCSI_UEVENT_PATH_UPDATE user message handler
+ * @buf:       pointer to buffer containing iscsi path message
+ *
+ */
+static int bnx2i_nl_set_path(struct Scsi_Host *shost, struct iscsi_path *params)
+{
+       struct bnx2i_hba *hba = iscsi_host_priv(shost);
+       char *buf = (char *) params;
+       u16 len = sizeof(*params);
+
+       /* handled by cnic driver */
+       hba->cnic->iscsi_nl_msg_recv(hba->cnic, ISCSI_UEVENT_PATH_UPDATE, buf,
+                                    len);
+
+       return 0;
+}
+
+
+/*
+ * 'Scsi_Host_Template' structure and 'iscsi_tranport' structure template
+ * used while registering with the scsi host and iSCSI transport module.
+ */
+static struct scsi_host_template bnx2i_host_template = {
+       .module                 = THIS_MODULE,
+       .name                   = "Broadcom Offload iSCSI Initiator",
+       .proc_name              = "bnx2i",
+       .queuecommand           = iscsi_queuecommand,
+       .eh_abort_handler       = iscsi_eh_abort,
+       .eh_device_reset_handler = iscsi_eh_device_reset,
+       .eh_target_reset_handler = iscsi_eh_target_reset,
+       .can_queue              = 1024,
+       .max_sectors            = 127,
+       .cmd_per_lun            = 32,
+       .this_id                = -1,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .sg_tablesize           = ISCSI_MAX_BDS_PER_CMD,
+       .shost_attrs            = bnx2i_dev_attributes,
+};
+
+struct iscsi_transport bnx2i_iscsi_transport = {
+       .owner                  = THIS_MODULE,
+       .name                   = "bnx2i",
+       .caps                   = CAP_RECOVERY_L0 | CAP_HDRDGST |
+                                 CAP_MULTI_R2T | CAP_DATADGST |
+                                 CAP_DATA_PATH_OFFLOAD,
+       .param_mask             = ISCSI_MAX_RECV_DLENGTH |
+                                 ISCSI_MAX_XMIT_DLENGTH |
+                                 ISCSI_HDRDGST_EN |
+                                 ISCSI_DATADGST_EN |
+                                 ISCSI_INITIAL_R2T_EN |
+                                 ISCSI_MAX_R2T |
+                                 ISCSI_IMM_DATA_EN |
+                                 ISCSI_FIRST_BURST |
+                                 ISCSI_MAX_BURST |
+                                 ISCSI_PDU_INORDER_EN |
+                                 ISCSI_DATASEQ_INORDER_EN |
+                                 ISCSI_ERL |
+                                 ISCSI_CONN_PORT |
+                                 ISCSI_CONN_ADDRESS |
+                                 ISCSI_EXP_STATSN |
+                                 ISCSI_PERSISTENT_PORT |
+                                 ISCSI_PERSISTENT_ADDRESS |
+                                 ISCSI_TARGET_NAME | ISCSI_TPGT |
+                                 ISCSI_USERNAME | ISCSI_PASSWORD |
+                                 ISCSI_USERNAME_IN | ISCSI_PASSWORD_IN |
+                                 ISCSI_FAST_ABORT | ISCSI_ABORT_TMO |
+                                 ISCSI_LU_RESET_TMO |
+                                 ISCSI_PING_TMO | ISCSI_RECV_TMO |
+                                 ISCSI_IFACE_NAME | ISCSI_INITIATOR_NAME,
+       .host_param_mask        = ISCSI_HOST_HWADDRESS | ISCSI_HOST_NETDEV_NAME,
+       .create_session         = bnx2i_session_create,
+       .destroy_session        = bnx2i_session_destroy,
+       .create_conn            = bnx2i_conn_create,
+       .bind_conn              = bnx2i_conn_bind,
+       .destroy_conn           = bnx2i_conn_destroy,
+       .set_param              = iscsi_set_param,
+       .get_conn_param         = bnx2i_conn_get_param,
+       .get_session_param      = iscsi_session_get_param,
+       .get_host_param         = bnx2i_host_get_param,
+       .start_conn             = bnx2i_conn_start,
+       .stop_conn              = iscsi_conn_stop,
+       .send_pdu               = iscsi_conn_send_pdu,
+       .xmit_task              = bnx2i_task_xmit,
+       .get_stats              = bnx2i_conn_get_stats,
+       /* TCP connect - disconnect - option-2 interface calls */
+       .ep_connect             = bnx2i_ep_connect,
+       .ep_poll                = bnx2i_ep_poll,
+       .ep_disconnect          = bnx2i_ep_disconnect,
+       .set_path               = bnx2i_nl_set_path,
+       /* Error recovery timeout call */
+       .session_recovery_timedout = iscsi_session_recovery_timedout,
+       .cleanup_task           = bnx2i_cleanup_task,
+};
diff --git a/drivers/scsi/bnx2i/bnx2i_sysfs.c b/drivers/scsi/bnx2i/bnx2i_sysfs.c
new file mode 100644 (file)
index 0000000..96426b7
--- /dev/null
@@ -0,0 +1,142 @@
+/* bnx2i_sysfs.c: Broadcom NetXtreme II iSCSI driver.
+ *
+ * Copyright (c) 2004 - 2009 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation.
+ *
+ * Written by: Anil Veerabhadrappa (anilgv@broadcom.com)
+ */
+
+#include "bnx2i.h"
+
+/**
+ * bnx2i_dev_to_hba - maps dev pointer to adapter struct
+ * @dev:       device pointer
+ *
+ * Map device to hba structure
+ */
+static inline struct bnx2i_hba *bnx2i_dev_to_hba(struct device *dev)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       return iscsi_host_priv(shost);
+}
+
+
+/**
+ * bnx2i_show_sq_info - return(s currently configured send queue (SQ) size
+ * @dev:       device pointer
+ * @buf:       buffer to return current SQ size parameter
+ *
+ * Returns current SQ size parameter, this paramater determines the number
+ * outstanding iSCSI commands supported on a connection
+ */
+static ssize_t bnx2i_show_sq_info(struct device *dev,
+                                 struct device_attribute *attr, char *buf)
+{
+       struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+       return sprintf(buf, "0x%x\n", hba->max_sqes);
+}
+
+
+/**
+ * bnx2i_set_sq_info - update send queue (SQ) size parameter
+ * @dev:       device pointer
+ * @buf:       buffer to return current SQ size parameter
+ * @count:     parameter buffer size
+ *
+ * Interface for user to change shared queue size allocated for each conn
+ * Must be within SQ limits and a power of 2. For the latter this is needed
+ * because of how libiscsi preallocates tasks.
+ */
+static ssize_t bnx2i_set_sq_info(struct device *dev,
+                                struct device_attribute *attr,
+                                const char *buf, size_t count)
+{
+       struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+       u32 val;
+       int max_sq_size;
+
+       if (hba->ofld_conns_active)
+               goto skip_config;
+
+       if (test_bit(BNX2I_NX2_DEV_57710, &hba->cnic_dev_type))
+               max_sq_size = BNX2I_5770X_SQ_WQES_MAX;
+       else
+               max_sq_size = BNX2I_570X_SQ_WQES_MAX;
+
+       if (sscanf(buf, " 0x%x ", &val) > 0) {
+               if ((val >= BNX2I_SQ_WQES_MIN) && (val <= max_sq_size) &&
+                   (is_power_of_2(val)))
+                       hba->max_sqes = val;
+       }
+
+       return count;
+
+skip_config:
+       printk(KERN_ERR "bnx2i: device busy, cannot change SQ size\n");
+       return 0;
+}
+
+
+/**
+ * bnx2i_show_ccell_info - returns command cell (HQ) size
+ * @dev:       device pointer
+ * @buf:       buffer to return current SQ size parameter
+ *
+ * returns per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_show_ccell_info(struct device *dev,
+                                    struct device_attribute *attr, char *buf)
+{
+       struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+       return sprintf(buf, "0x%x\n", hba->num_ccell);
+}
+
+
+/**
+ * bnx2i_get_link_state - set command cell (HQ) size
+ * @dev:       device pointer
+ * @buf:       buffer to return current SQ size parameter
+ * @count:     parameter buffer size
+ *
+ * updates per-connection TCP history queue size parameter
+ */
+static ssize_t bnx2i_set_ccell_info(struct device *dev,
+                                   struct device_attribute *attr,
+                                   const char *buf, size_t count)
+{
+       u32 val;
+       struct bnx2i_hba *hba = bnx2i_dev_to_hba(dev);
+
+       if (hba->ofld_conns_active)
+               goto skip_config;
+
+       if (sscanf(buf, " 0x%x ", &val) > 0) {
+               if ((val >= BNX2I_CCELLS_MIN) &&
+                   (val <= BNX2I_CCELLS_MAX)) {
+                       hba->num_ccell = val;
+               }
+       }
+
+       return count;
+
+skip_config:
+       printk(KERN_ERR "bnx2i: device busy, cannot change CCELL size\n");
+       return 0;
+}
+
+
+static DEVICE_ATTR(sq_size, S_IRUGO | S_IWUSR,
+                  bnx2i_show_sq_info, bnx2i_set_sq_info);
+static DEVICE_ATTR(num_ccell, S_IRUGO | S_IWUSR,
+                  bnx2i_show_ccell_info, bnx2i_set_ccell_info);
+
+struct device_attribute *bnx2i_dev_attributes[] = {
+       &dev_attr_sq_size,
+       &dev_attr_num_ccell,
+       NULL
+};
index 59b0958d2d116d0a85b2bbd4c045535a8028fdb5..e3133b58e5944d5c2c36a2bda0d2ed6a161076a5 100644 (file)
@@ -144,7 +144,6 @@ struct cxgb3i_adapter *cxgb3i_adapter_find_by_tdev(struct t3cdev *);
 void cxgb3i_adapter_open(struct t3cdev *);
 void cxgb3i_adapter_close(struct t3cdev *);
 
-struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *);
 struct cxgb3i_hba *cxgb3i_hba_host_add(struct cxgb3i_adapter *,
                                       struct net_device *);
 void cxgb3i_hba_host_remove(struct cxgb3i_hba *);
index 9212400b9b13ecde501a5fe75831855d742fe86e..74369a3f963b66bdbdbaa0b71bdd6f848a074048 100644 (file)
@@ -13,6 +13,7 @@
 
 #include <linux/inet.h>
 #include <linux/crypto.h>
+#include <net/dst.h>
 #include <net/tcp.h>
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -178,7 +179,7 @@ void cxgb3i_adapter_close(struct t3cdev *t3dev)
  * cxgb3i_hba_find_by_netdev - find the cxgb3i_hba structure via net_device
  * @t3dev: t3cdev adapter
  */
-struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
+static struct cxgb3i_hba *cxgb3i_hba_find_by_netdev(struct net_device *ndev)
 {
        struct cxgb3i_adapter *snic;
        int i;
@@ -261,20 +262,27 @@ void cxgb3i_hba_host_remove(struct cxgb3i_hba *hba)
 
 /**
  * cxgb3i_ep_connect - establish TCP connection to target portal
+ * @shost:             scsi host to use
  * @dst_addr:          target IP address
  * @non_blocking:      blocking or non-blocking call
  *
  * Initiates a TCP/IP connection to the dst_addr
  */
-static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
+static struct iscsi_endpoint *cxgb3i_ep_connect(struct Scsi_Host *shost,
+                                               struct sockaddr *dst_addr,
                                                int non_blocking)
 {
        struct iscsi_endpoint *ep;
        struct cxgb3i_endpoint *cep;
-       struct cxgb3i_hba *hba;
+       struct cxgb3i_hba *hba = NULL;
        struct s3_conn *c3cn = NULL;
        int err = 0;
 
+       if (shost)
+               hba = iscsi_host_priv(shost);
+
+       cxgb3i_api_debug("shost 0x%p, hba 0x%p.\n", shost, hba);
+
        c3cn = cxgb3i_c3cn_create();
        if (!c3cn) {
                cxgb3i_log_info("ep connect OOM.\n");
@@ -282,17 +290,27 @@ static struct iscsi_endpoint *cxgb3i_ep_connect(struct sockaddr *dst_addr,
                goto release_conn;
        }
 
-       err = cxgb3i_c3cn_connect(c3cn, (struct sockaddr_in *)dst_addr);
+       err = cxgb3i_c3cn_connect(hba ? hba->ndev : NULL, c3cn,
+                                (struct sockaddr_in *)dst_addr);
        if (err < 0) {
                cxgb3i_log_info("ep connect failed.\n");
                goto release_conn;
        }
+
        hba = cxgb3i_hba_find_by_netdev(c3cn->dst_cache->dev);
        if (!hba) {
                err = -ENOSPC;
                cxgb3i_log_info("NOT going through cxgbi device.\n");
                goto release_conn;
        }
+
+       if (shost && hba != iscsi_host_priv(shost)) {
+               err = -ENOSPC;
+               cxgb3i_log_info("Could not connect through request host%u\n",
+                               shost->host_no);
+               goto release_conn;
+       }
+
        if (c3cn_is_closing(c3cn)) {
                err = -ENOSPC;
                cxgb3i_log_info("ep connect unable to connect.\n");
index e11c9c180f39d322127f37b9aae88a5409b04863..c1d5be4adf9c6cdfe7fa530c93821f22f091ba97 100644 (file)
@@ -1479,12 +1479,13 @@ static struct net_device *cxgb3_egress_dev(struct net_device *root_dev,
        return NULL;
 }
 
-static struct rtable *find_route(__be32 saddr, __be32 daddr,
+static struct rtable *find_route(struct net_device *dev,
+                                __be32 saddr, __be32 daddr,
                                 __be16 sport, __be16 dport)
 {
        struct rtable *rt;
        struct flowi fl = {
-               .oif = 0,
+               .oif = dev ? dev->ifindex : 0,
                .nl_u = {
                         .ip4_u = {
                                   .daddr = daddr,
@@ -1573,36 +1574,40 @@ out_err:
  *
  * return 0 if active open request is sent, < 0 otherwise.
  */
-int cxgb3i_c3cn_connect(struct s3_conn *c3cn, struct sockaddr_in *usin)
+int cxgb3i_c3cn_connect(struct net_device *dev, struct s3_conn *c3cn,
+                       struct sockaddr_in *usin)
 {
        struct rtable *rt;
-       struct net_device *dev;
        struct cxgb3i_sdev_data *cdata;
        struct t3cdev *cdev;
        __be32 sipv4;
        int err;
 
+       c3cn_conn_debug("c3cn 0x%p, dev 0x%p.\n", c3cn, dev);
+
        if (usin->sin_family != AF_INET)
                return -EAFNOSUPPORT;
 
        c3cn->daddr.sin_port = usin->sin_port;
        c3cn->daddr.sin_addr.s_addr = usin->sin_addr.s_addr;
 
-       rt = find_route(c3cn->saddr.sin_addr.s_addr,
+       rt = find_route(dev, c3cn->saddr.sin_addr.s_addr,
                        c3cn->daddr.sin_addr.s_addr,
                        c3cn->saddr.sin_port,
                        c3cn->daddr.sin_port);
        if (rt == NULL) {
-               c3cn_conn_debug("NO route to 0x%x, port %u.\n",
+               c3cn_conn_debug("NO route to 0x%x, port %u, dev %s.\n",
                                c3cn->daddr.sin_addr.s_addr,
-                               ntohs(c3cn->daddr.sin_port));
+                               ntohs(c3cn->daddr.sin_port),
+                               dev ? dev->name : "any");
                return -ENETUNREACH;
        }
 
        if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
-               c3cn_conn_debug("multi-cast route to 0x%x, port %u.\n",
+               c3cn_conn_debug("multi-cast route to 0x%x, port %u, dev %s.\n",
                                c3cn->daddr.sin_addr.s_addr,
-                               ntohs(c3cn->daddr.sin_port));
+                               ntohs(c3cn->daddr.sin_port),
+                               dev ? dev->name : "any");
                ip_rt_put(rt);
                return -ENETUNREACH;
        }
index ebfca960c0a9d8358236c8658c084e44bda4ed3d..6a1d86b1fafea325c9bcbc415419732273b8568c 100644 (file)
@@ -169,7 +169,8 @@ void cxgb3i_sdev_add(struct t3cdev *, struct cxgb3_client *);
 void cxgb3i_sdev_remove(struct t3cdev *);
 
 struct s3_conn *cxgb3i_c3cn_create(void);
-int cxgb3i_c3cn_connect(struct s3_conn *, struct sockaddr_in *);
+int cxgb3i_c3cn_connect(struct net_device *, struct s3_conn *,
+                       struct sockaddr_in *);
 void cxgb3i_c3cn_rx_credits(struct s3_conn *, int);
 int cxgb3i_c3cn_send_pdus(struct s3_conn *, struct sk_buff *);
 void cxgb3i_c3cn_release(struct s3_conn *);
index 43b8c51e98d090d5cdf1f333fc449730e6002d1e..fd0544f7da81700d2e586387b0f20ae1b0cd64d2 100644 (file)
@@ -561,6 +561,12 @@ static int rdac_check_sense(struct scsi_device *sdev,
        struct rdac_dh_data *h = get_rdac_data(sdev);
        switch (sense_hdr->sense_key) {
        case NOT_READY:
+               if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
+                       /* LUN Not Ready - Logical Unit Not Ready and is in
+                       * the process of becoming ready
+                       * Just retry.
+                       */
+                       return ADD_TO_MLQUEUE;
                if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x81)
                        /* LUN Not Ready - Storage firmware incompatible
                         * Manual code synchonisation required.
index 03e1926f40b523c231033e809031ad9b1d138ec7..e606b4829d4430684a973c743d721bf81ba66d1f 100644 (file)
@@ -54,7 +54,6 @@ MODULE_LICENSE("GPL v2");
 /* fcoe host list */
 LIST_HEAD(fcoe_hostlist);
 DEFINE_RWLOCK(fcoe_hostlist_lock);
-DEFINE_TIMER(fcoe_timer, NULL, 0, 0);
 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
 
 /* Function Prototypes */
@@ -71,7 +70,7 @@ static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
 static int fcoe_hostlist_add(const struct fc_lport *);
 static int fcoe_hostlist_remove(const struct fc_lport *);
 
-static int fcoe_check_wait_queue(struct fc_lport *);
+static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
 static void fcoe_dev_setup(void);
 static void fcoe_dev_cleanup(void);
@@ -146,6 +145,7 @@ static int fcoe_lport_config(struct fc_lport *lp)
        lp->link_up = 0;
        lp->qfull = 0;
        lp->max_retry_count = 3;
+       lp->max_rport_retry_count = 3;
        lp->e_d_tov = 2 * 1000; /* FC-FS default */
        lp->r_a_tov = 2 * 2 * 1000;
        lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -166,6 +166,18 @@ static int fcoe_lport_config(struct fc_lport *lp)
        return 0;
 }
 
+/**
+ * fcoe_queue_timer() - fcoe queue timer
+ * @lp: the fc_lport pointer
+ *
+ * Calls fcoe_check_wait_queue on timeout
+ *
+ */
+static void fcoe_queue_timer(ulong lp)
+{
+       fcoe_check_wait_queue((struct fc_lport *)lp, NULL);
+}
+
 /**
  * fcoe_netdev_config() - Set up netdev for SW FCoE
  * @lp : ptr to the fc_lport
@@ -236,6 +248,7 @@ static int fcoe_netdev_config(struct fc_lport *lp, struct net_device *netdev)
        }
        skb_queue_head_init(&fc->fcoe_pending_queue);
        fc->fcoe_pending_queue_active = 0;
+       setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
 
        /* setup Source Mac Address */
        memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
@@ -386,6 +399,9 @@ static int fcoe_if_destroy(struct net_device *netdev)
        /* Free existing skbs */
        fcoe_clean_pending_queue(lp);
 
+       /* Stop the timer */
+       del_timer_sync(&fc->timer);
+
        /* Free memory used by statistical counters */
        fc_lport_free_stats(lp);
 
@@ -988,7 +1004,7 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
  */
 int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
 {
-       int wlen, rc = 0;
+       int wlen;
        u32 crc;
        struct ethhdr *eh;
        struct fcoe_crc_eof *cp;
@@ -1021,8 +1037,7 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
        sof = fr_sof(fp);
        eof = fr_eof(fp);
 
-       elen = (fc->real_dev->priv_flags & IFF_802_1Q_VLAN) ?
-               sizeof(struct vlan_ethhdr) : sizeof(struct ethhdr);
+       elen = sizeof(struct ethhdr);
        hlen = sizeof(struct fcoe_hdr);
        tlen = sizeof(struct fcoe_crc_eof);
        wlen = (skb->len - tlen + sizeof(crc)) / FCOE_WORD_TO_BYTE;
@@ -1107,18 +1122,9 @@ int fcoe_xmit(struct fc_lport *lp, struct fc_frame *fp)
        /* send down to lld */
        fr_dev(fp) = lp;
        if (fc->fcoe_pending_queue.qlen)
-               rc = fcoe_check_wait_queue(lp);
-
-       if (rc == 0)
-               rc = fcoe_start_io(skb);
-
-       if (rc) {
-               spin_lock_bh(&fc->fcoe_pending_queue.lock);
-               __skb_queue_tail(&fc->fcoe_pending_queue, skb);
-               spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-               if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
-                       lp->qfull = 1;
-       }
+               fcoe_check_wait_queue(lp, skb);
+       else if (fcoe_start_io(skb))
+               fcoe_check_wait_queue(lp, skb);
 
        return 0;
 }
@@ -1267,32 +1273,6 @@ int fcoe_percpu_receive_thread(void *arg)
        return 0;
 }
 
-/**
- * fcoe_watchdog() - fcoe timer callback
- * @vp:
- *
- * This checks the pending queue length for fcoe and set lport qfull
- * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
- * fcoe_hostlist.
- *
- * Returns: 0 for success
- */
-void fcoe_watchdog(ulong vp)
-{
-       struct fcoe_softc *fc;
-
-       read_lock(&fcoe_hostlist_lock);
-       list_for_each_entry(fc, &fcoe_hostlist, list) {
-               if (fc->ctlr.lp)
-                       fcoe_check_wait_queue(fc->ctlr.lp);
-       }
-       read_unlock(&fcoe_hostlist_lock);
-
-       fcoe_timer.expires = jiffies + (1 * HZ);
-       add_timer(&fcoe_timer);
-}
-
-
 /**
  * fcoe_check_wait_queue() - attempt to clear the transmit backlog
  * @lp: the fc_lport
@@ -1305,16 +1285,17 @@ void fcoe_watchdog(ulong vp)
  * The wait_queue is used when the skb transmit fails. skb will go
  * in the wait_queue which will be emptied by the timer function or
  * by the next skb transmit.
- *
- * Returns: 0 for success
  */
-static int fcoe_check_wait_queue(struct fc_lport *lp)
+static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
 {
        struct fcoe_softc *fc = lport_priv(lp);
-       struct sk_buff *skb;
-       int rc = -1;
+       int rc;
 
        spin_lock_bh(&fc->fcoe_pending_queue.lock);
+
+       if (skb)
+               __skb_queue_tail(&fc->fcoe_pending_queue, skb);
+
        if (fc->fcoe_pending_queue_active)
                goto out;
        fc->fcoe_pending_queue_active = 1;
@@ -1340,23 +1321,26 @@ static int fcoe_check_wait_queue(struct fc_lport *lp)
 
        if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
                lp->qfull = 0;
+       if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
+               mod_timer(&fc->timer, jiffies + 2);
        fc->fcoe_pending_queue_active = 0;
-       rc = fc->fcoe_pending_queue.qlen;
 out:
+       if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+               lp->qfull = 1;
        spin_unlock_bh(&fc->fcoe_pending_queue.lock);
-       return rc;
+       return;
 }
 
 /**
  * fcoe_dev_setup() - setup link change notification interface
  */
-static void fcoe_dev_setup()
+static void fcoe_dev_setup(void)
 {
        register_netdevice_notifier(&fcoe_notifier);
 }
 
 /**
- * fcoe_dev_setup() - cleanup link change notification interface
+ * fcoe_dev_cleanup() - cleanup link change notification interface
  */
 static void fcoe_dev_cleanup(void)
 {
@@ -1815,10 +1799,6 @@ static int __init fcoe_init(void)
        /* Setup link change notification */
        fcoe_dev_setup();
 
-       setup_timer(&fcoe_timer, fcoe_watchdog, 0);
-
-       mod_timer(&fcoe_timer, jiffies + (10 * HZ));
-
        fcoe_if_init();
 
        return 0;
@@ -1844,9 +1824,6 @@ static void __exit fcoe_exit(void)
 
        fcoe_dev_cleanup();
 
-       /* Stop the timer */
-       del_timer_sync(&fcoe_timer);
-
        /* releases the associated fcoe hosts */
        list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
                fcoe_if_destroy(fc->real_dev);
index 917aae88689733feb40d49963928d5d6bf6863f9..a1eb8c1988b07584bed2a10280a6efbf768f8813 100644 (file)
@@ -61,6 +61,7 @@ struct fcoe_softc {
        struct packet_type  fip_packet_type;
        struct sk_buff_head fcoe_pending_queue;
        u8      fcoe_pending_queue_active;
+       struct timer_list timer;                /* queue timer */
        struct fcoe_ctlr ctlr;
 };
 
index 62ba0f39c6bd267f2e7a5fc30354792434569b29..929411880e4b16c010ca7ca3d7369fddec6abd88 100644 (file)
@@ -213,7 +213,7 @@ static void fcoe_ctlr_solicit(struct fcoe_ctlr *fip, struct fcoe_fcf *fcf)
        sol->desc.size.fd_size = htons(fcoe_size);
 
        skb_put(skb, sizeof(*sol));
-       skb->protocol = htons(ETH_P_802_3);
+       skb->protocol = htons(ETH_P_FIP);
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -365,7 +365,7 @@ static void fcoe_ctlr_send_keep_alive(struct fcoe_ctlr *fip, int ports, u8 *sa)
        }
 
        skb_put(skb, len);
-       skb->protocol = htons(ETH_P_802_3);
+       skb->protocol = htons(ETH_P_FIP);
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        fip->send(fip, skb);
@@ -424,7 +424,7 @@ static int fcoe_ctlr_encaps(struct fcoe_ctlr *fip,
        if (dtype != ELS_FLOGI)
                memcpy(mac->fd_mac, fip->data_src_addr, ETH_ALEN);
 
-       skb->protocol = htons(ETH_P_802_3);
+       skb->protocol = htons(ETH_P_FIP);
        skb_reset_mac_header(skb);
        skb_reset_network_header(skb);
        return 0;
@@ -447,14 +447,10 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
        u16 old_xid;
        u8 op;
 
-       if (fip->state == FIP_ST_NON_FIP)
-               return 0;
-
        fh = (struct fc_frame_header *)skb->data;
        op = *(u8 *)(fh + 1);
 
-       switch (op) {
-       case ELS_FLOGI:
+       if (op == ELS_FLOGI) {
                old_xid = fip->flogi_oxid;
                fip->flogi_oxid = ntohs(fh->fh_ox_id);
                if (fip->state == FIP_ST_AUTO) {
@@ -466,6 +462,15 @@ int fcoe_ctlr_els_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
                        fip->map_dest = 1;
                        return 0;
                }
+               if (fip->state == FIP_ST_NON_FIP)
+                       fip->map_dest = 1;
+       }
+
+       if (fip->state == FIP_ST_NON_FIP)
+               return 0;
+
+       switch (op) {
+       case ELS_FLOGI:
                op = FIP_DT_FLOGI;
                break;
        case ELS_FDISC:
index 32ef6b87d8953142aa8bdd24092f7f2d77540c00..a84072865fc2615a7a9fcce47c48b94fe05f4592 100644 (file)
@@ -680,6 +680,7 @@ static int __devinit fnic_probe(struct pci_dev *pdev,
        }
 
        lp->max_retry_count = fnic->config.flogi_retries;
+       lp->max_rport_retry_count = fnic->config.plogi_retries;
        lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
                              FCP_SPPF_CONF_COMPL);
        if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
index 59349a316e137facdd9b7f76b412ac14f23190ae..1258da34fbc251bf779f85d1a2fdd0a015e51a34 100644 (file)
@@ -152,6 +152,7 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
                          struct Scsi_Host *host, gdth_ha_str *ha)
 {
     int size = 0,len = 0;
+    int hlen;
     off_t begin = 0,pos = 0;
     int id, i, j, k, sec, flag;
     int no_mdrv = 0, drv_no, is_mirr;
@@ -192,11 +193,11 @@ static int gdth_get_info(char *buffer,char **start,off_t offset,int length,
     if (reserve_list[0] == 0xff)
         strcpy(hrec, "--");
     else {
-        sprintf(hrec, "%d", reserve_list[0]);
+        hlen = sprintf(hrec, "%d", reserve_list[0]);
         for (i = 1;  i < MAX_RES_ARGS; i++) {
             if (reserve_list[i] == 0xff) 
                 break;
-            sprintf(hrec,"%s,%d", hrec, reserve_list[i]);
+            hlen += snprintf(hrec + hlen , 161 - hlen, ",%d", reserve_list[i]);
         }
     }
     size = sprintf(buffer+len,
index ea4abee7a2a95e8d783da6b7f82da2bc7f39aab8..b4b805e8d7db7aaad258c75f9d8fd30088ff228c 100644 (file)
@@ -110,7 +110,7 @@ static const struct {
        { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_DEAD, DID_ERROR, 0, 1, "transport dead" },
        { IBMVFC_FABRIC_MAPPED, IBMVFC_CONFIG_ERROR, DID_ERROR, 1, 1, "configuration error" },
        { IBMVFC_FABRIC_MAPPED, IBMVFC_NAME_SERVER_FAIL, DID_ERROR, 1, 1, "name server failure" },
-       { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 0, 0, "link halted" },
+       { IBMVFC_FABRIC_MAPPED, IBMVFC_LINK_HALTED, DID_REQUEUE, 1, 0, "link halted" },
        { IBMVFC_FABRIC_MAPPED, IBMVFC_XPORT_GENERAL, DID_OK, 1, 0, "general transport error" },
 
        { IBMVFC_VIOS_FAILURE, IBMVFC_CRQ_FAILURE, DID_REQUEUE, 1, 1, "CRQ failure" },
@@ -143,6 +143,7 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *);
 static void ibmvfc_tgt_send_prli(struct ibmvfc_target *);
 static void ibmvfc_tgt_send_plogi(struct ibmvfc_target *);
 static void ibmvfc_tgt_query_target(struct ibmvfc_target *);
+static void ibmvfc_npiv_logout(struct ibmvfc_host *);
 
 static const char *unknown_error = "unknown error";
 
@@ -275,7 +276,7 @@ static int ibmvfc_get_err_result(struct ibmvfc_cmd *vfc_cmd)
        int fc_rsp_len = rsp->fcp_rsp_len;
 
        if ((rsp->flags & FCP_RSP_LEN_VALID) &&
-           ((!fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
+           ((fc_rsp_len && fc_rsp_len != 4 && fc_rsp_len != 8) ||
             rsp->data.info.rsp_code))
                return DID_ERROR << 16;
 
@@ -431,6 +432,8 @@ static void ibmvfc_set_tgt_action(struct ibmvfc_target *tgt,
        case IBMVFC_TGT_ACTION_DEL_RPORT:
                break;
        default:
+               if (action == IBMVFC_TGT_ACTION_DEL_RPORT)
+                       tgt->add_rport = 0;
                tgt->action = action;
                break;
        }
@@ -475,6 +478,10 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
                if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT)
                        vhost->action = action;
                break;
+       case IBMVFC_HOST_ACTION_LOGO_WAIT:
+               if (vhost->action == IBMVFC_HOST_ACTION_LOGO)
+                       vhost->action = action;
+               break;
        case IBMVFC_HOST_ACTION_INIT_WAIT:
                if (vhost->action == IBMVFC_HOST_ACTION_INIT)
                        vhost->action = action;
@@ -483,7 +490,7 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
                switch (vhost->action) {
                case IBMVFC_HOST_ACTION_INIT_WAIT:
                case IBMVFC_HOST_ACTION_NONE:
-               case IBMVFC_HOST_ACTION_TGT_ADD:
+               case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
                        vhost->action = action;
                        break;
                default:
@@ -494,11 +501,11 @@ static void ibmvfc_set_host_action(struct ibmvfc_host *vhost,
                if (vhost->action == IBMVFC_HOST_ACTION_ALLOC_TGTS)
                        vhost->action = action;
                break;
+       case IBMVFC_HOST_ACTION_LOGO:
        case IBMVFC_HOST_ACTION_INIT:
        case IBMVFC_HOST_ACTION_TGT_DEL:
        case IBMVFC_HOST_ACTION_QUERY_TGTS:
        case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
-       case IBMVFC_HOST_ACTION_TGT_ADD:
        case IBMVFC_HOST_ACTION_NONE:
        default:
                vhost->action = action;
@@ -576,7 +583,7 @@ static void ibmvfc_init_host(struct ibmvfc_host *vhost, int relogin)
                }
 
                list_for_each_entry(tgt, &vhost->targets, queue)
-                       tgt->need_login = 1;
+                       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
                scsi_block_requests(vhost->host);
                ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
                vhost->job_step = ibmvfc_npiv_login;
@@ -646,6 +653,7 @@ static void ibmvfc_release_crq_queue(struct ibmvfc_host *vhost)
        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 
        vhost->state = IBMVFC_NO_CRQ;
+       vhost->logged_in = 0;
        dma_unmap_single(vhost->dev, crq->msg_token, PAGE_SIZE, DMA_BIDIRECTIONAL);
        free_page((unsigned long)crq->msgs);
 }
@@ -692,6 +700,7 @@ static int ibmvfc_reset_crq(struct ibmvfc_host *vhost)
        } while (rc == H_BUSY || H_IS_LONG_BUSY(rc));
 
        vhost->state = IBMVFC_NO_CRQ;
+       vhost->logged_in = 0;
        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
 
        /* Clean out the queue */
@@ -807,10 +816,10 @@ static void ibmvfc_purge_requests(struct ibmvfc_host *vhost, int error_code)
 }
 
 /**
- * __ibmvfc_reset_host - Reset the connection to the server (no locking)
+ * ibmvfc_hard_reset_host - Reset the connection to the server by breaking the CRQ
  * @vhost:     struct ibmvfc host to reset
  **/
-static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+static void ibmvfc_hard_reset_host(struct ibmvfc_host *vhost)
 {
        int rc;
 
@@ -826,9 +835,25 @@ static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
 }
 
 /**
- * ibmvfc_reset_host - Reset the connection to the server
+ * __ibmvfc_reset_host - Reset the connection to the server (no locking)
  * @vhost:     struct ibmvfc host to reset
  **/
+static void __ibmvfc_reset_host(struct ibmvfc_host *vhost)
+{
+       if (vhost->logged_in && vhost->action != IBMVFC_HOST_ACTION_LOGO_WAIT &&
+           !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
+               scsi_block_requests(vhost->host);
+               ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO);
+               vhost->job_step = ibmvfc_npiv_logout;
+               wake_up(&vhost->work_wait_q);
+       } else
+               ibmvfc_hard_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_reset_host - Reset the connection to the server
+ * @vhost:     ibmvfc host struct
+ **/
 static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
 {
        unsigned long flags;
@@ -842,9 +867,13 @@ static void ibmvfc_reset_host(struct ibmvfc_host *vhost)
  * ibmvfc_retry_host_init - Retry host initialization if allowed
  * @vhost:     ibmvfc host struct
  *
+ * Returns: 1 if init will be retried / 0 if not
+ *
  **/
-static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
+static int ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
 {
+       int retry = 0;
+
        if (vhost->action == IBMVFC_HOST_ACTION_INIT_WAIT) {
                vhost->delay_init = 1;
                if (++vhost->init_retries > IBMVFC_MAX_HOST_INIT_RETRIES) {
@@ -853,11 +882,14 @@ static void ibmvfc_retry_host_init(struct ibmvfc_host *vhost)
                        ibmvfc_link_down(vhost, IBMVFC_HOST_OFFLINE);
                } else if (vhost->init_retries == IBMVFC_MAX_HOST_INIT_RETRIES)
                        __ibmvfc_reset_host(vhost);
-               else
+               else {
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
+                       retry = 1;
+               }
        }
 
        wake_up(&vhost->work_wait_q);
+       return retry;
 }
 
 /**
@@ -1137,8 +1169,9 @@ static void ibmvfc_set_login_info(struct ibmvfc_host *vhost)
        login_info->partition_num = vhost->partition_number;
        login_info->vfc_frame_version = 1;
        login_info->fcp_version = 3;
+       login_info->flags = IBMVFC_FLUSH_ON_HALT;
        if (vhost->client_migrated)
-               login_info->flags = IBMVFC_CLIENT_MIGRATED;
+               login_info->flags |= IBMVFC_CLIENT_MIGRATED;
 
        login_info->max_cmds = max_requests + IBMVFC_NUM_INTERNAL_REQ;
        login_info->capabilities = IBMVFC_CAN_MIGRATE;
@@ -1451,6 +1484,27 @@ static void ibmvfc_log_error(struct ibmvfc_event *evt)
                    rsp->flags, rsp_code, scsi_get_resid(cmnd), rsp->scsi_status);
 }
 
+/**
+ * ibmvfc_relogin - Log back into the specified device
+ * @sdev:      scsi device struct
+ *
+ **/
+static void ibmvfc_relogin(struct scsi_device *sdev)
+{
+       struct ibmvfc_host *vhost = shost_priv(sdev->host);
+       struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
+       struct ibmvfc_target *tgt;
+
+       list_for_each_entry(tgt, &vhost->targets, queue) {
+               if (rport == tgt->rport) {
+                       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+                       break;
+               }
+       }
+
+       ibmvfc_reinit_host(vhost);
+}
+
 /**
  * ibmvfc_scsi_done - Handle responses from commands
  * @evt:       ibmvfc event to be handled
@@ -1483,7 +1537,7 @@ static void ibmvfc_scsi_done(struct ibmvfc_event *evt)
                        if ((rsp->flags & FCP_SNS_LEN_VALID) && rsp->fcp_sense_len && rsp_len <= 8)
                                memcpy(cmnd->sense_buffer, rsp->data.sense + rsp_len, sense_len);
                        if ((vfc_cmd->status & IBMVFC_VIOS_FAILURE) && (vfc_cmd->error == IBMVFC_PLOGI_REQUIRED))
-                               ibmvfc_reinit_host(evt->vhost);
+                               ibmvfc_relogin(cmnd->device);
 
                        if (!cmnd->result && (!scsi_get_resid(cmnd) || (rsp->flags & FCP_RESID_OVER)))
                                cmnd->result = (DID_ERROR << 16);
@@ -2148,13 +2202,31 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
                                struct ibmvfc_host *vhost)
 {
        const char *desc = ibmvfc_get_ae_desc(crq->event);
+       struct ibmvfc_target *tgt;
 
        ibmvfc_log(vhost, 3, "%s event received. scsi_id: %llx, wwpn: %llx,"
                   " node_name: %llx\n", desc, crq->scsi_id, crq->wwpn, crq->node_name);
 
        switch (crq->event) {
-       case IBMVFC_AE_LINK_UP:
        case IBMVFC_AE_RESUME:
+               switch (crq->link_state) {
+               case IBMVFC_AE_LS_LINK_DOWN:
+                       ibmvfc_link_down(vhost, IBMVFC_LINK_DOWN);
+                       break;
+               case IBMVFC_AE_LS_LINK_DEAD:
+                       ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+                       break;
+               case IBMVFC_AE_LS_LINK_UP:
+               case IBMVFC_AE_LS_LINK_BOUNCED:
+               default:
+                       vhost->events_to_log |= IBMVFC_AE_LINKUP;
+                       vhost->delay_init = 1;
+                       __ibmvfc_reset_host(vhost);
+                       break;
+               };
+
+               break;
+       case IBMVFC_AE_LINK_UP:
                vhost->events_to_log |= IBMVFC_AE_LINKUP;
                vhost->delay_init = 1;
                __ibmvfc_reset_host(vhost);
@@ -2168,9 +2240,23 @@ static void ibmvfc_handle_async(struct ibmvfc_async_crq *crq,
        case IBMVFC_AE_SCN_NPORT:
        case IBMVFC_AE_SCN_GROUP:
                vhost->events_to_log |= IBMVFC_AE_RSCN;
+               ibmvfc_reinit_host(vhost);
+               break;
        case IBMVFC_AE_ELS_LOGO:
        case IBMVFC_AE_ELS_PRLO:
        case IBMVFC_AE_ELS_PLOGI:
+               list_for_each_entry(tgt, &vhost->targets, queue) {
+                       if (!crq->scsi_id && !crq->wwpn && !crq->node_name)
+                               break;
+                       if (crq->scsi_id && tgt->scsi_id != crq->scsi_id)
+                               continue;
+                       if (crq->wwpn && tgt->ids.port_name != crq->wwpn)
+                               continue;
+                       if (crq->node_name && tgt->ids.node_name != crq->node_name)
+                               continue;
+                       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+               }
+
                ibmvfc_reinit_host(vhost);
                break;
        case IBMVFC_AE_LINK_DOWN:
@@ -2222,6 +2308,7 @@ static void ibmvfc_handle_crq(struct ibmvfc_crq *crq, struct ibmvfc_host *vhost)
                return;
        case IBMVFC_CRQ_XPORT_EVENT:
                vhost->state = IBMVFC_NO_CRQ;
+               vhost->logged_in = 0;
                ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
                if (crq->format == IBMVFC_PARTITION_MIGRATED) {
                        /* We need to re-setup the interpartition connection */
@@ -2299,7 +2386,7 @@ static int ibmvfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
                done = 1;
        }
 
-       if (vhost->state != IBMVFC_NO_CRQ && vhost->action == IBMVFC_HOST_ACTION_NONE)
+       if (vhost->scan_complete)
                done = 1;
        spin_unlock_irqrestore(shost->host_lock, flags);
        return done;
@@ -2434,14 +2521,6 @@ static ssize_t ibmvfc_show_host_partition_name(struct device *dev,
                        vhost->login_buf->resp.partition_name);
 }
 
-static struct device_attribute ibmvfc_host_partition_name = {
-       .attr = {
-               .name = "partition_name",
-               .mode = S_IRUGO,
-       },
-       .show = ibmvfc_show_host_partition_name,
-};
-
 static ssize_t ibmvfc_show_host_device_name(struct device *dev,
                                            struct device_attribute *attr, char *buf)
 {
@@ -2452,14 +2531,6 @@ static ssize_t ibmvfc_show_host_device_name(struct device *dev,
                        vhost->login_buf->resp.device_name);
 }
 
-static struct device_attribute ibmvfc_host_device_name = {
-       .attr = {
-               .name = "device_name",
-               .mode = S_IRUGO,
-       },
-       .show = ibmvfc_show_host_device_name,
-};
-
 static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
                                         struct device_attribute *attr, char *buf)
 {
@@ -2470,14 +2541,6 @@ static ssize_t ibmvfc_show_host_loc_code(struct device *dev,
                        vhost->login_buf->resp.port_loc_code);
 }
 
-static struct device_attribute ibmvfc_host_loc_code = {
-       .attr = {
-               .name = "port_loc_code",
-               .mode = S_IRUGO,
-       },
-       .show = ibmvfc_show_host_loc_code,
-};
-
 static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
                                         struct device_attribute *attr, char *buf)
 {
@@ -2488,14 +2551,6 @@ static ssize_t ibmvfc_show_host_drc_name(struct device *dev,
                        vhost->login_buf->resp.drc_name);
 }
 
-static struct device_attribute ibmvfc_host_drc_name = {
-       .attr = {
-               .name = "drc_name",
-               .mode = S_IRUGO,
-       },
-       .show = ibmvfc_show_host_drc_name,
-};
-
 static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
                                             struct device_attribute *attr, char *buf)
 {
@@ -2504,13 +2559,13 @@ static ssize_t ibmvfc_show_host_npiv_version(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%d\n", vhost->login_buf->resp.version);
 }
 
-static struct device_attribute ibmvfc_host_npiv_version = {
-       .attr = {
-               .name = "npiv_version",
-               .mode = S_IRUGO,
-       },
-       .show = ibmvfc_show_host_npiv_version,
-};
+static ssize_t ibmvfc_show_host_capabilities(struct device *dev,
+                                            struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvfc_host *vhost = shost_priv(shost);
+       return snprintf(buf, PAGE_SIZE, "%llx\n", vhost->login_buf->resp.capabilities);
+}
 
 /**
  * ibmvfc_show_log_level - Show the adapter's error logging level
@@ -2556,14 +2611,14 @@ static ssize_t ibmvfc_store_log_level(struct device *dev,
        return strlen(buf);
 }
 
-static struct device_attribute ibmvfc_log_level_attr = {
-       .attr = {
-               .name =         "log_level",
-               .mode =         S_IRUGO | S_IWUSR,
-       },
-       .show = ibmvfc_show_log_level,
-       .store = ibmvfc_store_log_level
-};
+static DEVICE_ATTR(partition_name, S_IRUGO, ibmvfc_show_host_partition_name, NULL);
+static DEVICE_ATTR(device_name, S_IRUGO, ibmvfc_show_host_device_name, NULL);
+static DEVICE_ATTR(port_loc_code, S_IRUGO, ibmvfc_show_host_loc_code, NULL);
+static DEVICE_ATTR(drc_name, S_IRUGO, ibmvfc_show_host_drc_name, NULL);
+static DEVICE_ATTR(npiv_version, S_IRUGO, ibmvfc_show_host_npiv_version, NULL);
+static DEVICE_ATTR(capabilities, S_IRUGO, ibmvfc_show_host_capabilities, NULL);
+static DEVICE_ATTR(log_level, S_IRUGO | S_IWUSR,
+                  ibmvfc_show_log_level, ibmvfc_store_log_level);
 
 #ifdef CONFIG_SCSI_IBMVFC_TRACE
 /**
@@ -2612,12 +2667,13 @@ static struct bin_attribute ibmvfc_trace_attr = {
 #endif
 
 static struct device_attribute *ibmvfc_attrs[] = {
-       &ibmvfc_host_partition_name,
-       &ibmvfc_host_device_name,
-       &ibmvfc_host_loc_code,
-       &ibmvfc_host_drc_name,
-       &ibmvfc_host_npiv_version,
-       &ibmvfc_log_level_attr,
+       &dev_attr_partition_name,
+       &dev_attr_device_name,
+       &dev_attr_port_loc_code,
+       &dev_attr_drc_name,
+       &dev_attr_npiv_version,
+       &dev_attr_capabilities,
+       &dev_attr_log_level,
        NULL
 };
 
@@ -2774,15 +2830,19 @@ static void ibmvfc_init_tgt(struct ibmvfc_target *tgt,
  * @tgt:               ibmvfc target struct
  * @job_step:  initialization job step
  *
+ * Returns: 1 if step will be retried / 0 if not
+ *
  **/
-static void ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
+static int ibmvfc_retry_tgt_init(struct ibmvfc_target *tgt,
                                  void (*job_step) (struct ibmvfc_target *))
 {
        if (++tgt->init_retries > IBMVFC_MAX_TGT_INIT_RETRIES) {
                ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
                wake_up(&tgt->vhost->work_wait_q);
+               return 0;
        } else
                ibmvfc_init_tgt(tgt, job_step);
+       return 1;
 }
 
 /* Defined in FC-LS */
@@ -2831,7 +2891,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
        struct ibmvfc_process_login *rsp = &evt->xfer_iu->prli;
        struct ibmvfc_prli_svc_parms *parms = &rsp->parms;
        u32 status = rsp->common.status;
-       int index;
+       int index, level = IBMVFC_DEFAULT_LOG_LEVEL;
 
        vhost->discovery_threads--;
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2850,7 +2910,7 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
                                                tgt->ids.roles |= FC_PORT_ROLE_FCP_TARGET;
                                        if (parms->service_parms & IBMVFC_PRLI_INITIATOR_FUNC)
                                                tgt->ids.roles |= FC_PORT_ROLE_FCP_INITIATOR;
-                                       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_ADD_RPORT);
+                                       tgt->add_rport = 1;
                                } else
                                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
                        } else if (prli_rsp[index].retry)
@@ -2867,13 +2927,14 @@ static void ibmvfc_tgt_prli_done(struct ibmvfc_event *evt)
                break;
        case IBMVFC_MAD_FAILED:
        default:
-               tgt_err(tgt, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
-                       ibmvfc_get_cmd_error(rsp->status, rsp->error),
-                       rsp->status, rsp->error, status);
                if (ibmvfc_retry_cmd(rsp->status, rsp->error))
-                       ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
+                       level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_prli);
                else
                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+               tgt_log(tgt, level, "Process Login failed: %s (%x:%x) rc=0x%02X\n",
+                       ibmvfc_get_cmd_error(rsp->status, rsp->error),
+                       rsp->status, rsp->error, status);
                break;
        };
 
@@ -2932,6 +2993,7 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
        struct ibmvfc_host *vhost = evt->vhost;
        struct ibmvfc_port_login *rsp = &evt->xfer_iu->plogi;
        u32 status = rsp->common.status;
+       int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
        vhost->discovery_threads--;
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -2960,15 +3022,15 @@ static void ibmvfc_tgt_plogi_done(struct ibmvfc_event *evt)
                break;
        case IBMVFC_MAD_FAILED:
        default:
-               tgt_err(tgt, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
-                       ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
-                       ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
-
                if (ibmvfc_retry_cmd(rsp->status, rsp->error))
-                       ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
+                       level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_send_plogi);
                else
                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+               tgt_log(tgt, level, "Port Login failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+                       ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+                       ibmvfc_get_ls_explain(rsp->fc_explain), rsp->fc_explain, status);
                break;
        };
 
@@ -3129,13 +3191,13 @@ static void ibmvfc_tgt_adisc_done(struct ibmvfc_event *evt)
        case IBMVFC_MAD_SUCCESS:
                tgt_dbg(tgt, "ADISC succeeded\n");
                if (ibmvfc_adisc_needs_plogi(mad, tgt))
-                       tgt->need_login = 1;
+                       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
                break;
        case IBMVFC_MAD_DRIVER_FAILED:
                break;
        case IBMVFC_MAD_FAILED:
        default:
-               tgt->need_login = 1;
+               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
                fc_reason = (mad->fc_iu.response[1] & 0x00ff0000) >> 16;
                fc_explain = (mad->fc_iu.response[1] & 0x0000ff00) >> 8;
                tgt_info(tgt, "ADISC failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
@@ -3322,6 +3384,7 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
        struct ibmvfc_host *vhost = evt->vhost;
        struct ibmvfc_query_tgt *rsp = &evt->xfer_iu->query_tgt;
        u32 status = rsp->common.status;
+       int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
        vhost->discovery_threads--;
        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
@@ -3341,19 +3404,19 @@ static void ibmvfc_tgt_query_target_done(struct ibmvfc_event *evt)
                break;
        case IBMVFC_MAD_FAILED:
        default:
-               tgt_err(tgt, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
-                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
-                       ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
-                       ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
-
                if ((rsp->status & IBMVFC_FABRIC_MAPPED) == IBMVFC_FABRIC_MAPPED &&
                    rsp->error == IBMVFC_UNABLE_TO_PERFORM_REQ &&
                    rsp->fc_explain == IBMVFC_PORT_NAME_NOT_REG)
                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
                else if (ibmvfc_retry_cmd(rsp->status, rsp->error))
-                       ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
+                       level += ibmvfc_retry_tgt_init(tgt, ibmvfc_tgt_query_target);
                else
                        ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_DEL_RPORT);
+
+               tgt_log(tgt, level, "Query Target failed: %s (%x:%x) %s (%x) %s (%x) rc=0x%02X\n",
+                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error,
+                       ibmvfc_get_fc_type(rsp->fc_type), rsp->fc_type,
+                       ibmvfc_get_gs_explain(rsp->fc_explain), rsp->fc_explain, status);
                break;
        };
 
@@ -3420,7 +3483,7 @@ static int ibmvfc_alloc_target(struct ibmvfc_host *vhost, u64 scsi_id)
        }
        spin_unlock_irqrestore(vhost->host->host_lock, flags);
 
-       tgt = mempool_alloc(vhost->tgt_pool, GFP_KERNEL);
+       tgt = mempool_alloc(vhost->tgt_pool, GFP_NOIO);
        if (!tgt) {
                dev_err(vhost->dev, "Target allocation failure for scsi id %08llx\n",
                        scsi_id);
@@ -3472,6 +3535,7 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
        struct ibmvfc_host *vhost = evt->vhost;
        struct ibmvfc_discover_targets *rsp = &evt->xfer_iu->discover_targets;
        u32 mad_status = rsp->common.status;
+       int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
        switch (mad_status) {
        case IBMVFC_MAD_SUCCESS:
@@ -3480,9 +3544,9 @@ static void ibmvfc_discover_targets_done(struct ibmvfc_event *evt)
                ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_ALLOC_TGTS);
                break;
        case IBMVFC_MAD_FAILED:
-               dev_err(vhost->dev, "Discover Targets failed: %s (%x:%x)\n",
-                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
-               ibmvfc_retry_host_init(vhost);
+               level += ibmvfc_retry_host_init(vhost);
+               ibmvfc_log(vhost, level, "Discover Targets failed: %s (%x:%x)\n",
+                          ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
                break;
        case IBMVFC_MAD_DRIVER_FAILED:
                break;
@@ -3534,18 +3598,19 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
        u32 mad_status = evt->xfer_iu->npiv_login.common.status;
        struct ibmvfc_npiv_login_resp *rsp = &vhost->login_buf->resp;
        unsigned int npiv_max_sectors;
+       int level = IBMVFC_DEFAULT_LOG_LEVEL;
 
        switch (mad_status) {
        case IBMVFC_MAD_SUCCESS:
                ibmvfc_free_event(evt);
                break;
        case IBMVFC_MAD_FAILED:
-               dev_err(vhost->dev, "NPIV Login failed: %s (%x:%x)\n",
-                       ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
                if (ibmvfc_retry_cmd(rsp->status, rsp->error))
-                       ibmvfc_retry_host_init(vhost);
+                       level += ibmvfc_retry_host_init(vhost);
                else
                        ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+               ibmvfc_log(vhost, level, "NPIV Login failed: %s (%x:%x)\n",
+                          ibmvfc_get_cmd_error(rsp->status, rsp->error), rsp->status, rsp->error);
                ibmvfc_free_event(evt);
                return;
        case IBMVFC_MAD_CRQ_ERROR:
@@ -3578,6 +3643,7 @@ static void ibmvfc_npiv_login_done(struct ibmvfc_event *evt)
                return;
        }
 
+       vhost->logged_in = 1;
        npiv_max_sectors = min((uint)(rsp->max_dma_len >> 9), IBMVFC_MAX_SECTORS);
        dev_info(vhost->dev, "Host partition: %s, device: %s %s %s max sectors %u\n",
                 rsp->partition_name, rsp->device_name, rsp->port_loc_code,
@@ -3635,6 +3701,65 @@ static void ibmvfc_npiv_login(struct ibmvfc_host *vhost)
                ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
 };
 
+/**
+ * ibmvfc_npiv_logout_done - Completion handler for NPIV Logout
+ * @vhost:             ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_logout_done(struct ibmvfc_event *evt)
+{
+       struct ibmvfc_host *vhost = evt->vhost;
+       u32 mad_status = evt->xfer_iu->npiv_logout.common.status;
+
+       ibmvfc_free_event(evt);
+
+       switch (mad_status) {
+       case IBMVFC_MAD_SUCCESS:
+               if (list_empty(&vhost->sent) &&
+                   vhost->action == IBMVFC_HOST_ACTION_LOGO_WAIT) {
+                       ibmvfc_init_host(vhost, 0);
+                       return;
+               }
+               break;
+       case IBMVFC_MAD_FAILED:
+       case IBMVFC_MAD_NOT_SUPPORTED:
+       case IBMVFC_MAD_CRQ_ERROR:
+       case IBMVFC_MAD_DRIVER_FAILED:
+       default:
+               ibmvfc_dbg(vhost, "NPIV Logout failed. 0x%X\n", mad_status);
+               break;
+       }
+
+       ibmvfc_hard_reset_host(vhost);
+}
+
+/**
+ * ibmvfc_npiv_logout - Issue an NPIV Logout
+ * @vhost:             ibmvfc host struct
+ *
+ **/
+static void ibmvfc_npiv_logout(struct ibmvfc_host *vhost)
+{
+       struct ibmvfc_npiv_logout_mad *mad;
+       struct ibmvfc_event *evt;
+
+       evt = ibmvfc_get_event(vhost);
+       ibmvfc_init_event(evt, ibmvfc_npiv_logout_done, IBMVFC_MAD_FORMAT);
+
+       mad = &evt->iu.npiv_logout;
+       memset(mad, 0, sizeof(*mad));
+       mad->common.version = 1;
+       mad->common.opcode = IBMVFC_NPIV_LOGOUT;
+       mad->common.length = sizeof(struct ibmvfc_npiv_logout_mad);
+
+       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_LOGO_WAIT);
+
+       if (!ibmvfc_send_event(evt, vhost, default_timeout))
+               ibmvfc_dbg(vhost, "Sent NPIV logout\n");
+       else
+               ibmvfc_link_down(vhost, IBMVFC_LINK_DEAD);
+}
+
 /**
  * ibmvfc_dev_init_to_do - Is there target initialization work to do?
  * @vhost:             ibmvfc host struct
@@ -3671,6 +3796,7 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
        switch (vhost->action) {
        case IBMVFC_HOST_ACTION_NONE:
        case IBMVFC_HOST_ACTION_INIT_WAIT:
+       case IBMVFC_HOST_ACTION_LOGO_WAIT:
                return 0;
        case IBMVFC_HOST_ACTION_TGT_INIT:
        case IBMVFC_HOST_ACTION_QUERY_TGTS:
@@ -3683,9 +3809,9 @@ static int __ibmvfc_work_to_do(struct ibmvfc_host *vhost)
                        if (tgt->action == IBMVFC_TGT_ACTION_INIT_WAIT)
                                return 0;
                return 1;
+       case IBMVFC_HOST_ACTION_LOGO:
        case IBMVFC_HOST_ACTION_INIT:
        case IBMVFC_HOST_ACTION_ALLOC_TGTS:
-       case IBMVFC_HOST_ACTION_TGT_ADD:
        case IBMVFC_HOST_ACTION_TGT_DEL:
        case IBMVFC_HOST_ACTION_TGT_DEL_FAILED:
        case IBMVFC_HOST_ACTION_QUERY:
@@ -3740,25 +3866,26 @@ static void ibmvfc_log_ae(struct ibmvfc_host *vhost, int events)
 static void ibmvfc_tgt_add_rport(struct ibmvfc_target *tgt)
 {
        struct ibmvfc_host *vhost = tgt->vhost;
-       struct fc_rport *rport = tgt->rport;
+       struct fc_rport *rport;
        unsigned long flags;
 
-       if (rport) {
-               tgt_dbg(tgt, "Setting rport roles\n");
-               fc_remote_port_rolechg(rport, tgt->ids.roles);
-               spin_lock_irqsave(vhost->host->host_lock, flags);
-               ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
+       tgt_dbg(tgt, "Adding rport\n");
+       rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+
+       if (rport && tgt->action == IBMVFC_TGT_ACTION_DEL_RPORT) {
+               tgt_dbg(tgt, "Deleting rport\n");
+               list_del(&tgt->queue);
                spin_unlock_irqrestore(vhost->host->host_lock, flags);
+               fc_remote_port_delete(rport);
+               del_timer_sync(&tgt->timer);
+               kref_put(&tgt->kref, ibmvfc_release_tgt);
                return;
        }
 
-       tgt_dbg(tgt, "Adding rport\n");
-       rport = fc_remote_port_add(vhost->host, 0, &tgt->ids);
-       spin_lock_irqsave(vhost->host->host_lock, flags);
-       tgt->rport = rport;
-       ibmvfc_set_tgt_action(tgt, IBMVFC_TGT_ACTION_NONE);
        if (rport) {
                tgt_dbg(tgt, "rport add succeeded\n");
+               tgt->rport = rport;
                rport->maxframe_size = tgt->service_parms.common.bb_rcv_sz & 0x0fff;
                rport->supported_classes = 0;
                tgt->target_id = rport->scsi_target_id;
@@ -3789,8 +3916,12 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
        vhost->events_to_log = 0;
        switch (vhost->action) {
        case IBMVFC_HOST_ACTION_NONE:
+       case IBMVFC_HOST_ACTION_LOGO_WAIT:
        case IBMVFC_HOST_ACTION_INIT_WAIT:
                break;
+       case IBMVFC_HOST_ACTION_LOGO:
+               vhost->job_step(vhost);
+               break;
        case IBMVFC_HOST_ACTION_INIT:
                BUG_ON(vhost->state != IBMVFC_INITIALIZING);
                if (vhost->delay_init) {
@@ -3836,11 +3967,21 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
 
                if (vhost->state == IBMVFC_INITIALIZING) {
                        if (vhost->action == IBMVFC_HOST_ACTION_TGT_DEL_FAILED) {
-                               ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
-                               ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_ADD);
-                               vhost->init_retries = 0;
-                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
-                               scsi_unblock_requests(vhost->host);
+                               if (vhost->reinit) {
+                                       vhost->reinit = 0;
+                                       scsi_block_requests(vhost->host);
+                                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
+                                       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                               } else {
+                                       ibmvfc_set_host_state(vhost, IBMVFC_ACTIVE);
+                                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
+                                       wake_up(&vhost->init_wait_q);
+                                       schedule_work(&vhost->rport_add_work_q);
+                                       vhost->init_retries = 0;
+                                       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                                       scsi_unblock_requests(vhost->host);
+                               }
+
                                return;
                        } else {
                                ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_INIT);
@@ -3871,24 +4012,6 @@ static void ibmvfc_do_work(struct ibmvfc_host *vhost)
                if (!ibmvfc_dev_init_to_do(vhost))
                        ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_TGT_DEL_FAILED);
                break;
-       case IBMVFC_HOST_ACTION_TGT_ADD:
-               list_for_each_entry(tgt, &vhost->targets, queue) {
-                       if (tgt->action == IBMVFC_TGT_ACTION_ADD_RPORT) {
-                               spin_unlock_irqrestore(vhost->host->host_lock, flags);
-                               ibmvfc_tgt_add_rport(tgt);
-                               return;
-                       }
-               }
-
-               if (vhost->reinit && !ibmvfc_set_host_state(vhost, IBMVFC_INITIALIZING)) {
-                       vhost->reinit = 0;
-                       scsi_block_requests(vhost->host);
-                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_QUERY);
-               } else {
-                       ibmvfc_set_host_action(vhost, IBMVFC_HOST_ACTION_NONE);
-                       wake_up(&vhost->init_wait_q);
-               }
-               break;
        default:
                break;
        };
@@ -4117,6 +4240,56 @@ nomem:
        return -ENOMEM;
 }
 
+/**
+ * ibmvfc_rport_add_thread - Worker thread for rport adds
+ * @work:      work struct
+ *
+ **/
+static void ibmvfc_rport_add_thread(struct work_struct *work)
+{
+       struct ibmvfc_host *vhost = container_of(work, struct ibmvfc_host,
+                                                rport_add_work_q);
+       struct ibmvfc_target *tgt;
+       struct fc_rport *rport;
+       unsigned long flags;
+       int did_work;
+
+       ENTER;
+       spin_lock_irqsave(vhost->host->host_lock, flags);
+       do {
+               did_work = 0;
+               if (vhost->state != IBMVFC_ACTIVE)
+                       break;
+
+               list_for_each_entry(tgt, &vhost->targets, queue) {
+                       if (tgt->add_rport) {
+                               did_work = 1;
+                               tgt->add_rport = 0;
+                               kref_get(&tgt->kref);
+                               rport = tgt->rport;
+                               if (!rport) {
+                                       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                                       ibmvfc_tgt_add_rport(tgt);
+                               } else if (get_device(&rport->dev)) {
+                                       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+                                       tgt_dbg(tgt, "Setting rport roles\n");
+                                       fc_remote_port_rolechg(rport, tgt->ids.roles);
+                                       put_device(&rport->dev);
+                               }
+
+                               kref_put(&tgt->kref, ibmvfc_release_tgt);
+                               spin_lock_irqsave(vhost->host->host_lock, flags);
+                               break;
+                       }
+               }
+       } while(did_work);
+
+       if (vhost->state == IBMVFC_ACTIVE)
+               vhost->scan_complete = 1;
+       spin_unlock_irqrestore(vhost->host->host_lock, flags);
+       LEAVE;
+}
+
 /**
  * ibmvfc_probe - Adapter hot plug add entry point
  * @vdev:      vio device struct
@@ -4160,6 +4333,7 @@ static int ibmvfc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        strcpy(vhost->partition_name, "UNKNOWN");
        init_waitqueue_head(&vhost->work_wait_q);
        init_waitqueue_head(&vhost->init_wait_q);
+       INIT_WORK(&vhost->rport_add_work_q, ibmvfc_rport_add_thread);
 
        if ((rc = ibmvfc_alloc_mem(vhost)))
                goto free_scsi_host;
index ca1dcf7a7568b95ab1f4d3608a5fd710a449798c..c2668d7d67f5308cdfc33819f0333e719f842947 100644 (file)
@@ -29,8 +29,8 @@
 #include "viosrp.h"
 
 #define IBMVFC_NAME    "ibmvfc"
-#define IBMVFC_DRIVER_VERSION          "1.0.5"
-#define IBMVFC_DRIVER_DATE             "(March 19, 2009)"
+#define IBMVFC_DRIVER_VERSION          "1.0.6"
+#define IBMVFC_DRIVER_DATE             "(May 28, 2009)"
 
 #define IBMVFC_DEFAULT_TIMEOUT 60
 #define IBMVFC_ADISC_CANCEL_TIMEOUT    45
  * Ensure we have resources for ERP and initialization:
  * 1 for ERP
  * 1 for initialization
+ * 1 for NPIV Logout
  * 2 for each discovery thread
  */
-#define IBMVFC_NUM_INTERNAL_REQ        (1 + 1 + (disc_threads * 2))
+#define IBMVFC_NUM_INTERNAL_REQ        (1 + 1 + 1 + (disc_threads * 2))
 
 #define IBMVFC_MAD_SUCCESS             0x00
 #define IBMVFC_MAD_NOT_SUPPORTED       0xF1
@@ -127,6 +128,7 @@ enum ibmvfc_mad_types {
        IBMVFC_IMPLICIT_LOGOUT  = 0x0040,
        IBMVFC_PASSTHRU         = 0x0200,
        IBMVFC_TMF_MAD          = 0x0100,
+       IBMVFC_NPIV_LOGOUT      = 0x0800,
 };
 
 struct ibmvfc_mad_common {
@@ -143,6 +145,10 @@ struct ibmvfc_npiv_login_mad {
        struct srp_direct_buf buffer;
 }__attribute__((packed, aligned (8)));
 
+struct ibmvfc_npiv_logout_mad {
+       struct ibmvfc_mad_common common;
+}__attribute__((packed, aligned (8)));
+
 #define IBMVFC_MAX_NAME 256
 
 struct ibmvfc_npiv_login {
@@ -201,7 +207,8 @@ struct ibmvfc_npiv_login_resp {
 #define IBMVFC_NATIVE_FC               0x01
 #define IBMVFC_CAN_FLUSH_ON_HALT       0x08
        u32 reserved;
-       u64 capabilites;
+       u64 capabilities;
+#define IBMVFC_CAN_FLUSH_ON_HALT       0x08
        u32 max_cmds;
        u32 scsi_id_sz;
        u64 max_dma_len;
@@ -541,9 +548,17 @@ struct ibmvfc_crq_queue {
        dma_addr_t msg_token;
 };
 
+enum ibmvfc_ae_link_state {
+       IBMVFC_AE_LS_LINK_UP            = 0x01,
+       IBMVFC_AE_LS_LINK_BOUNCED       = 0x02,
+       IBMVFC_AE_LS_LINK_DOWN          = 0x04,
+       IBMVFC_AE_LS_LINK_DEAD          = 0x08,
+};
+
 struct ibmvfc_async_crq {
        volatile u8 valid;
-       u8 pad[3];
+       u8 link_state;
+       u8 pad[2];
        u32 pad2;
        volatile u64 event;
        volatile u64 scsi_id;
@@ -561,6 +576,7 @@ struct ibmvfc_async_crq_queue {
 union ibmvfc_iu {
        struct ibmvfc_mad_common mad_common;
        struct ibmvfc_npiv_login_mad npiv_login;
+       struct ibmvfc_npiv_logout_mad npiv_logout;
        struct ibmvfc_discover_targets discover_targets;
        struct ibmvfc_port_login plogi;
        struct ibmvfc_process_login prli;
@@ -575,7 +591,6 @@ enum ibmvfc_target_action {
        IBMVFC_TGT_ACTION_NONE = 0,
        IBMVFC_TGT_ACTION_INIT,
        IBMVFC_TGT_ACTION_INIT_WAIT,
-       IBMVFC_TGT_ACTION_ADD_RPORT,
        IBMVFC_TGT_ACTION_DEL_RPORT,
 };
 
@@ -588,6 +603,7 @@ struct ibmvfc_target {
        int target_id;
        enum ibmvfc_target_action action;
        int need_login;
+       int add_rport;
        int init_retries;
        u32 cancel_key;
        struct ibmvfc_service_parms service_parms;
@@ -627,6 +643,8 @@ struct ibmvfc_event_pool {
 
 enum ibmvfc_host_action {
        IBMVFC_HOST_ACTION_NONE = 0,
+       IBMVFC_HOST_ACTION_LOGO,
+       IBMVFC_HOST_ACTION_LOGO_WAIT,
        IBMVFC_HOST_ACTION_INIT,
        IBMVFC_HOST_ACTION_INIT_WAIT,
        IBMVFC_HOST_ACTION_QUERY,
@@ -635,7 +653,6 @@ enum ibmvfc_host_action {
        IBMVFC_HOST_ACTION_ALLOC_TGTS,
        IBMVFC_HOST_ACTION_TGT_INIT,
        IBMVFC_HOST_ACTION_TGT_DEL_FAILED,
-       IBMVFC_HOST_ACTION_TGT_ADD,
 };
 
 enum ibmvfc_host_state {
@@ -682,6 +699,8 @@ struct ibmvfc_host {
        int client_migrated;
        int reinit;
        int delay_init;
+       int scan_complete;
+       int logged_in;
        int events_to_log;
 #define IBMVFC_AE_LINKUP       0x0001
 #define IBMVFC_AE_LINKDOWN     0x0002
@@ -692,6 +711,7 @@ struct ibmvfc_host {
        void (*job_step) (struct ibmvfc_host *);
        struct task_struct *work_thread;
        struct tasklet_struct tasklet;
+       struct work_struct rport_add_work_q;
        wait_queue_head_t init_wait_q;
        wait_queue_head_t work_wait_q;
 };
@@ -707,6 +727,12 @@ struct ibmvfc_host {
 #define tgt_err(t, fmt, ...)           \
        dev_err((t)->vhost->dev, "%llX: " fmt, (t)->scsi_id, ##__VA_ARGS__)
 
+#define tgt_log(t, level, fmt, ...) \
+       do { \
+               if ((t)->vhost->log_level >= level) \
+                       tgt_err(t, fmt, ##__VA_ARGS__); \
+       } while (0)
+
 #define ibmvfc_dbg(vhost, ...) \
        DBG_CMD(dev_info((vhost)->dev, ##__VA_ARGS__))
 
index c9aa7611e40824142534d21aa46fc4461936ba33..11d2602ae88ecd2ffd4c2ab8b759538dac8a02c8 100644 (file)
@@ -70,6 +70,7 @@
 #include <linux/moduleparam.h>
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
+#include <linux/of.h>
 #include <asm/firmware.h>
 #include <asm/vio.h>
 #include <asm/firmware.h>
  */
 static int max_id = 64;
 static int max_channel = 3;
-static int init_timeout = 5;
+static int init_timeout = 300;
+static int login_timeout = 60;
+static int info_timeout = 30;
+static int abort_timeout = 60;
+static int reset_timeout = 60;
 static int max_requests = IBMVSCSI_MAX_REQUESTS_DEFAULT;
 static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
+static int fast_fail = 1;
+static int client_reserve = 1;
 
 static struct scsi_transport_template *ibmvscsi_transport_template;
 
@@ -110,6 +117,10 @@ module_param_named(init_timeout, init_timeout, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(init_timeout, "Initialization timeout in seconds");
 module_param_named(max_requests, max_requests, int, S_IRUGO);
 MODULE_PARM_DESC(max_requests, "Maximum requests for this adapter");
+module_param_named(fast_fail, fast_fail, int, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(fast_fail, "Enable fast fail. [Default=1]");
+module_param_named(client_reserve, client_reserve, int, S_IRUGO );
+MODULE_PARM_DESC(client_reserve, "Attempt client managed reserve/release");
 
 /* ------------------------------------------------------------
  * Routines for the event pool and event structs
@@ -781,105 +792,53 @@ static int ibmvscsi_queuecommand(struct scsi_cmnd *cmnd,
 /* ------------------------------------------------------------
  * Routines for driver initialization
  */
+
 /**
- * adapter_info_rsp: - Handle response to MAD adapter info request
- * @evt_struct:        srp_event_struct with the response
+ * map_persist_bufs: - Pre-map persistent data for adapter logins
+ * @hostdata:   ibmvscsi_host_data of host
  *
- * Used as a "done" callback by when sending adapter_info. Gets called
- * by ibmvscsi_handle_crq()
-*/
-static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+ * Map the capabilities and adapter info DMA buffers to avoid runtime failures.
+ * Return 1 on error, 0 on success.
+ */
+static int map_persist_bufs(struct ibmvscsi_host_data *hostdata)
 {
-       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
-       dma_unmap_single(hostdata->dev,
-                        evt_struct->iu.mad.adapter_info.buffer,
-                        evt_struct->iu.mad.adapter_info.common.length,
-                        DMA_BIDIRECTIONAL);
 
-       if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
-               dev_err(hostdata->dev, "error %d getting adapter info\n",
-                       evt_struct->xfer_iu->mad.adapter_info.common.status);
-       } else {
-               dev_info(hostdata->dev, "host srp version: %s, "
-                        "host partition %s (%d), OS %d, max io %u\n",
-                        hostdata->madapter_info.srp_version,
-                        hostdata->madapter_info.partition_name,
-                        hostdata->madapter_info.partition_number,
-                        hostdata->madapter_info.os_type,
-                        hostdata->madapter_info.port_max_txu[0]);
-               
-               if (hostdata->madapter_info.port_max_txu[0]) 
-                       hostdata->host->max_sectors = 
-                               hostdata->madapter_info.port_max_txu[0] >> 9;
-               
-               if (hostdata->madapter_info.os_type == 3 &&
-                   strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
-                       dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
-                               hostdata->madapter_info.srp_version);
-                       dev_err(hostdata->dev, "limiting scatterlists to %d\n",
-                               MAX_INDIRECT_BUFS);
-                       hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
-               }
+       hostdata->caps_addr = dma_map_single(hostdata->dev, &hostdata->caps,
+                                            sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+
+       if (dma_mapping_error(hostdata->dev, hostdata->caps_addr)) {
+               dev_err(hostdata->dev, "Unable to map capabilities buffer!\n");
+               return 1;
        }
+
+       hostdata->adapter_info_addr = dma_map_single(hostdata->dev,
+                                                    &hostdata->madapter_info,
+                                                    sizeof(hostdata->madapter_info),
+                                                    DMA_BIDIRECTIONAL);
+       if (dma_mapping_error(hostdata->dev, hostdata->adapter_info_addr)) {
+               dev_err(hostdata->dev, "Unable to map adapter info buffer!\n");
+               dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+                                sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
+               return 1;
+       }
+
+       return 0;
 }
 
 /**
- * send_mad_adapter_info: - Sends the mad adapter info request
- *      and stores the result so it can be retrieved with
- *      sysfs.  We COULD consider causing a failure if the
- *      returned SRP version doesn't match ours.
- * @hostdata:  ibmvscsi_host_data of host
- * 
- * Returns zero if successful.
-*/
-static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+ * unmap_persist_bufs: - Unmap persistent data needed for adapter logins
+ * @hostdata:   ibmvscsi_host_data of host
+ *
+ * Unmap the capabilities and adapter info DMA buffers
+ */
+static void unmap_persist_bufs(struct ibmvscsi_host_data *hostdata)
 {
-       struct viosrp_adapter_info *req;
-       struct srp_event_struct *evt_struct;
-       unsigned long flags;
-       dma_addr_t addr;
-
-       evt_struct = get_event_struct(&hostdata->pool);
-       if (!evt_struct) {
-               dev_err(hostdata->dev,
-                       "couldn't allocate an event for ADAPTER_INFO_REQ!\n");
-               return;
-       }
-
-       init_event_struct(evt_struct,
-                         adapter_info_rsp,
-                         VIOSRP_MAD_FORMAT,
-                         init_timeout);
-       
-       req = &evt_struct->iu.mad.adapter_info;
-       memset(req, 0x00, sizeof(*req));
-       
-       req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
-       req->common.length = sizeof(hostdata->madapter_info);
-       req->buffer = addr = dma_map_single(hostdata->dev,
-                                           &hostdata->madapter_info,
-                                           sizeof(hostdata->madapter_info),
-                                           DMA_BIDIRECTIONAL);
+       dma_unmap_single(hostdata->dev, hostdata->caps_addr,
+                        sizeof(hostdata->caps), DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(hostdata->dev, req->buffer)) {
-               if (!firmware_has_feature(FW_FEATURE_CMO))
-                       dev_err(hostdata->dev,
-                               "Unable to map request_buffer for "
-                               "adapter_info!\n");
-               free_event_struct(&hostdata->pool, evt_struct);
-               return;
-       }
-       
-       spin_lock_irqsave(hostdata->host->host_lock, flags);
-       if (ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2)) {
-               dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
-               dma_unmap_single(hostdata->dev,
-                                addr,
-                                sizeof(hostdata->madapter_info),
-                                DMA_BIDIRECTIONAL);
-       }
-       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
-};
+       dma_unmap_single(hostdata->dev, hostdata->adapter_info_addr,
+                        sizeof(hostdata->madapter_info), DMA_BIDIRECTIONAL);
+}
 
 /**
  * login_rsp: - Handle response to SRP login request
@@ -909,9 +868,7 @@ static void login_rsp(struct srp_event_struct *evt_struct)
        }
 
        dev_info(hostdata->dev, "SRP_LOGIN succeeded\n");
-
-       if (evt_struct->xfer_iu->srp.login_rsp.req_lim_delta < 0)
-               dev_err(hostdata->dev, "Invalid request_limit.\n");
+       hostdata->client_migrated = 0;
 
        /* Now we know what the real request-limit is.
         * This value is set rather than added to request_limit because
@@ -922,15 +879,12 @@ static void login_rsp(struct srp_event_struct *evt_struct)
 
        /* If we had any pending I/Os, kick them */
        scsi_unblock_requests(hostdata->host);
-
-       send_mad_adapter_info(hostdata);
-       return;
 }
 
 /**
  * send_srp_login: - Sends the srp login
  * @hostdata:  ibmvscsi_host_data of host
- * 
+ *
  * Returns zero if successful.
 */
 static int send_srp_login(struct ibmvscsi_host_data *hostdata)
@@ -939,22 +893,17 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
        unsigned long flags;
        struct srp_login_req *login;
        struct srp_event_struct *evt_struct = get_event_struct(&hostdata->pool);
-       if (!evt_struct) {
-               dev_err(hostdata->dev, "couldn't allocate an event for login req!\n");
-               return FAILED;
-       }
 
-       init_event_struct(evt_struct,
-                         login_rsp,
-                         VIOSRP_SRP_FORMAT,
-                         init_timeout);
+       BUG_ON(!evt_struct);
+       init_event_struct(evt_struct, login_rsp,
+                         VIOSRP_SRP_FORMAT, login_timeout);
 
        login = &evt_struct->iu.srp.login_req;
-       memset(login, 0x00, sizeof(struct srp_login_req));
+       memset(login, 0, sizeof(*login));
        login->opcode = SRP_LOGIN_REQ;
        login->req_it_iu_len = sizeof(union srp_iu);
        login->req_buf_fmt = SRP_BUF_FORMAT_DIRECT | SRP_BUF_FORMAT_INDIRECT;
-       
+
        spin_lock_irqsave(hostdata->host->host_lock, flags);
        /* Start out with a request limit of 0, since this is negotiated in
         * the login request we are just sending and login requests always
@@ -962,12 +911,240 @@ static int send_srp_login(struct ibmvscsi_host_data *hostdata)
         */
        atomic_set(&hostdata->request_limit, 0);
 
-       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, login_timeout * 2);
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
        dev_info(hostdata->dev, "sent SRP login\n");
        return rc;
 };
 
+/**
+ * capabilities_rsp: - Handle response to MAD adapter capabilities request
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info.
+ */
+static void capabilities_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+       if (evt_struct->xfer_iu->mad.capabilities.common.status) {
+               dev_err(hostdata->dev, "error 0x%X getting capabilities info\n",
+                       evt_struct->xfer_iu->mad.capabilities.common.status);
+       } else {
+               if (hostdata->caps.migration.common.server_support != SERVER_SUPPORTS_CAP)
+                       dev_info(hostdata->dev, "Partition migration not supported\n");
+
+               if (client_reserve) {
+                       if (hostdata->caps.reserve.common.server_support ==
+                           SERVER_SUPPORTS_CAP)
+                               dev_info(hostdata->dev, "Client reserve enabled\n");
+                       else
+                               dev_info(hostdata->dev, "Client reserve not supported\n");
+               }
+       }
+
+       send_srp_login(hostdata);
+}
+
+/**
+ * send_mad_capabilities: - Sends the mad capabilities request
+ *      and stores the result so it can be retrieved with
+ * @hostdata:  ibmvscsi_host_data of host
+ */
+static void send_mad_capabilities(struct ibmvscsi_host_data *hostdata)
+{
+       struct viosrp_capabilities *req;
+       struct srp_event_struct *evt_struct;
+       unsigned long flags;
+       struct device_node *of_node = hostdata->dev->archdata.of_node;
+       const char *location;
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct, capabilities_rsp,
+                         VIOSRP_MAD_FORMAT, info_timeout);
+
+       req = &evt_struct->iu.mad.capabilities;
+       memset(req, 0, sizeof(*req));
+
+       hostdata->caps.flags = CAP_LIST_SUPPORTED;
+       if (hostdata->client_migrated)
+               hostdata->caps.flags |= CLIENT_MIGRATED;
+
+       strncpy(hostdata->caps.name, dev_name(&hostdata->host->shost_gendev),
+               sizeof(hostdata->caps.name));
+       hostdata->caps.name[sizeof(hostdata->caps.name) - 1] = '\0';
+
+       location = of_get_property(of_node, "ibm,loc-code", NULL);
+       location = location ? location : dev_name(hostdata->dev);
+       strncpy(hostdata->caps.loc, location, sizeof(hostdata->caps.loc));
+       hostdata->caps.loc[sizeof(hostdata->caps.loc) - 1] = '\0';
+
+       req->common.type = VIOSRP_CAPABILITIES_TYPE;
+       req->buffer = hostdata->caps_addr;
+
+       hostdata->caps.migration.common.cap_type = MIGRATION_CAPABILITIES;
+       hostdata->caps.migration.common.length = sizeof(hostdata->caps.migration);
+       hostdata->caps.migration.common.server_support = SERVER_SUPPORTS_CAP;
+       hostdata->caps.migration.ecl = 1;
+
+       if (client_reserve) {
+               hostdata->caps.reserve.common.cap_type = RESERVATION_CAPABILITIES;
+               hostdata->caps.reserve.common.length = sizeof(hostdata->caps.reserve);
+               hostdata->caps.reserve.common.server_support = SERVER_SUPPORTS_CAP;
+               hostdata->caps.reserve.type = CLIENT_RESERVE_SCSI_2;
+               req->common.length = sizeof(hostdata->caps);
+       } else
+               req->common.length = sizeof(hostdata->caps) - sizeof(hostdata->caps.reserve);
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+               dev_err(hostdata->dev, "couldn't send CAPABILITIES_REQ!\n");
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * fast_fail_rsp: - Handle response to MAD enable fast fail
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending enable fast fail. Gets called
+ * by ibmvscsi_handle_crq()
+ */
+static void fast_fail_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+       u8 status = evt_struct->xfer_iu->mad.fast_fail.common.status;
+
+       if (status == VIOSRP_MAD_NOT_SUPPORTED)
+               dev_err(hostdata->dev, "fast_fail not supported in server\n");
+       else if (status == VIOSRP_MAD_FAILED)
+               dev_err(hostdata->dev, "fast_fail request failed\n");
+       else if (status != VIOSRP_MAD_SUCCESS)
+               dev_err(hostdata->dev, "error 0x%X enabling fast_fail\n", status);
+
+       send_mad_capabilities(hostdata);
+}
+
+/**
+ * init_host - Start host initialization
+ * @hostdata:  ibmvscsi_host_data of host
+ *
+ * Returns zero if successful.
+ */
+static int enable_fast_fail(struct ibmvscsi_host_data *hostdata)
+{
+       int rc;
+       unsigned long flags;
+       struct viosrp_fast_fail *fast_fail_mad;
+       struct srp_event_struct *evt_struct;
+
+       if (!fast_fail) {
+               send_mad_capabilities(hostdata);
+               return 0;
+       }
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct, fast_fail_rsp, VIOSRP_MAD_FORMAT, info_timeout);
+
+       fast_fail_mad = &evt_struct->iu.mad.fast_fail;
+       memset(fast_fail_mad, 0, sizeof(*fast_fail_mad));
+       fast_fail_mad->common.type = VIOSRP_ENABLE_FAST_FAIL;
+       fast_fail_mad->common.length = sizeof(*fast_fail_mad);
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+       return rc;
+}
+
+/**
+ * adapter_info_rsp: - Handle response to MAD adapter info request
+ * @evt_struct:        srp_event_struct with the response
+ *
+ * Used as a "done" callback by when sending adapter_info. Gets called
+ * by ibmvscsi_handle_crq()
+*/
+static void adapter_info_rsp(struct srp_event_struct *evt_struct)
+{
+       struct ibmvscsi_host_data *hostdata = evt_struct->hostdata;
+
+       if (evt_struct->xfer_iu->mad.adapter_info.common.status) {
+               dev_err(hostdata->dev, "error %d getting adapter info\n",
+                       evt_struct->xfer_iu->mad.adapter_info.common.status);
+       } else {
+               dev_info(hostdata->dev, "host srp version: %s, "
+                        "host partition %s (%d), OS %d, max io %u\n",
+                        hostdata->madapter_info.srp_version,
+                        hostdata->madapter_info.partition_name,
+                        hostdata->madapter_info.partition_number,
+                        hostdata->madapter_info.os_type,
+                        hostdata->madapter_info.port_max_txu[0]);
+               
+               if (hostdata->madapter_info.port_max_txu[0]) 
+                       hostdata->host->max_sectors = 
+                               hostdata->madapter_info.port_max_txu[0] >> 9;
+               
+               if (hostdata->madapter_info.os_type == 3 &&
+                   strcmp(hostdata->madapter_info.srp_version, "1.6a") <= 0) {
+                       dev_err(hostdata->dev, "host (Ver. %s) doesn't support large transfers\n",
+                               hostdata->madapter_info.srp_version);
+                       dev_err(hostdata->dev, "limiting scatterlists to %d\n",
+                               MAX_INDIRECT_BUFS);
+                       hostdata->host->sg_tablesize = MAX_INDIRECT_BUFS;
+               }
+       }
+
+       enable_fast_fail(hostdata);
+}
+
+/**
+ * send_mad_adapter_info: - Sends the mad adapter info request
+ *      and stores the result so it can be retrieved with
+ *      sysfs.  We COULD consider causing a failure if the
+ *      returned SRP version doesn't match ours.
+ * @hostdata:  ibmvscsi_host_data of host
+ * 
+ * Returns zero if successful.
+*/
+static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
+{
+       struct viosrp_adapter_info *req;
+       struct srp_event_struct *evt_struct;
+       unsigned long flags;
+
+       evt_struct = get_event_struct(&hostdata->pool);
+       BUG_ON(!evt_struct);
+
+       init_event_struct(evt_struct,
+                         adapter_info_rsp,
+                         VIOSRP_MAD_FORMAT,
+                         info_timeout);
+       
+       req = &evt_struct->iu.mad.adapter_info;
+       memset(req, 0x00, sizeof(*req));
+       
+       req->common.type = VIOSRP_ADAPTER_INFO_TYPE;
+       req->common.length = sizeof(hostdata->madapter_info);
+       req->buffer = hostdata->adapter_info_addr;
+
+       spin_lock_irqsave(hostdata->host->host_lock, flags);
+       if (ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2))
+               dev_err(hostdata->dev, "couldn't send ADAPTER_INFO_REQ!\n");
+       spin_unlock_irqrestore(hostdata->host->host_lock, flags);
+};
+
+/**
+ * init_adapter: Start virtual adapter initialization sequence
+ *
+ */
+static void init_adapter(struct ibmvscsi_host_data *hostdata)
+{
+       send_mad_adapter_info(hostdata);
+}
+
 /**
  * sync_completion: Signal that a synchronous command has completed
  * Note that after returning from this call, the evt_struct is freed.
@@ -1029,7 +1206,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
                init_event_struct(evt,
                                  sync_completion,
                                  VIOSRP_SRP_FORMAT,
-                                 init_timeout);
+                                 abort_timeout);
 
                tsk_mgmt = &evt->iu.srp.tsk_mgmt;
        
@@ -1043,7 +1220,7 @@ static int ibmvscsi_eh_abort_handler(struct scsi_cmnd *cmd)
                evt->sync_srp = &srp_rsp;
 
                init_completion(&evt->comp);
-               rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+               rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, abort_timeout * 2);
 
                if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
                        break;
@@ -1152,7 +1329,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
                init_event_struct(evt,
                                  sync_completion,
                                  VIOSRP_SRP_FORMAT,
-                                 init_timeout);
+                                 reset_timeout);
 
                tsk_mgmt = &evt->iu.srp.tsk_mgmt;
 
@@ -1165,7 +1342,7 @@ static int ibmvscsi_eh_device_reset_handler(struct scsi_cmnd *cmd)
                evt->sync_srp = &srp_rsp;
 
                init_completion(&evt->comp);
-               rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, init_timeout * 2);
+               rsp_rc = ibmvscsi_send_srp_event(evt, hostdata, reset_timeout * 2);
 
                if (rsp_rc != SCSI_MLQUEUE_HOST_BUSY)
                        break;
@@ -1281,7 +1458,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                        if ((rc = ibmvscsi_ops->send_crq(hostdata,
                                                         0xC002000000000000LL, 0)) == 0) {
                                /* Now login */
-                               send_srp_login(hostdata);
+                               init_adapter(hostdata);
                        } else {
                                dev_err(hostdata->dev, "Unable to send init rsp. rc=%ld\n", rc);
                        }
@@ -1291,7 +1468,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                        dev_info(hostdata->dev, "partner initialization complete\n");
 
                        /* Now login */
-                       send_srp_login(hostdata);
+                       init_adapter(hostdata);
                        break;
                default:
                        dev_err(hostdata->dev, "unknown crq message type: %d\n", crq->format);
@@ -1303,6 +1480,7 @@ void ibmvscsi_handle_crq(struct viosrp_crq *crq,
                if (crq->format == 0x06) {
                        /* We need to re-setup the interpartition connection */
                        dev_info(hostdata->dev, "Re-enabling adapter!\n");
+                       hostdata->client_migrated = 1;
                        purge_requests(hostdata, DID_REQUEUE);
                        if ((ibmvscsi_ops->reenable_crq_queue(&hostdata->queue,
                                                              hostdata)) ||
@@ -1397,7 +1575,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
        init_event_struct(evt_struct,
                          sync_completion,
                          VIOSRP_MAD_FORMAT,
-                         init_timeout);
+                         info_timeout);
 
        host_config = &evt_struct->iu.mad.host_config;
 
@@ -1419,7 +1597,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
 
        init_completion(&evt_struct->comp);
        spin_lock_irqsave(hostdata->host->host_lock, flags);
-       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, init_timeout * 2);
+       rc = ibmvscsi_send_srp_event(evt_struct, hostdata, info_timeout * 2);
        spin_unlock_irqrestore(hostdata->host->host_lock, flags);
        if (rc == 0)
                wait_for_completion(&evt_struct->comp);
@@ -1444,7 +1622,7 @@ static int ibmvscsi_slave_configure(struct scsi_device *sdev)
        spin_lock_irqsave(shost->host_lock, lock_flags);
        if (sdev->type == TYPE_DISK) {
                sdev->allow_restart = 1;
-               blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
+               blk_queue_rq_timeout(sdev->request_queue, 120 * HZ);
        }
        scsi_adjust_queue_depth(sdev, 0, shost->cmd_per_lun);
        spin_unlock_irqrestore(shost->host_lock, lock_flags);
@@ -1471,6 +1649,46 @@ static int ibmvscsi_change_queue_depth(struct scsi_device *sdev, int qdepth)
 /* ------------------------------------------------------------
  * sysfs attributes
  */
+static ssize_t show_host_vhost_loc(struct device *dev,
+                                  struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+       int len;
+
+       len = snprintf(buf, sizeof(hostdata->caps.loc), "%s\n",
+                      hostdata->caps.loc);
+       return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_loc = {
+       .attr = {
+                .name = "vhost_loc",
+                .mode = S_IRUGO,
+                },
+       .show = show_host_vhost_loc,
+};
+
+static ssize_t show_host_vhost_name(struct device *dev,
+                                   struct device_attribute *attr, char *buf)
+{
+       struct Scsi_Host *shost = class_to_shost(dev);
+       struct ibmvscsi_host_data *hostdata = shost_priv(shost);
+       int len;
+
+       len = snprintf(buf, sizeof(hostdata->caps.name), "%s\n",
+                      hostdata->caps.name);
+       return len;
+}
+
+static struct device_attribute ibmvscsi_host_vhost_name = {
+       .attr = {
+                .name = "vhost_name",
+                .mode = S_IRUGO,
+                },
+       .show = show_host_vhost_name,
+};
+
 static ssize_t show_host_srp_version(struct device *dev,
                                     struct device_attribute *attr, char *buf)
 {
@@ -1594,6 +1812,8 @@ static struct device_attribute ibmvscsi_host_config = {
 };
 
 static struct device_attribute *ibmvscsi_attrs[] = {
+       &ibmvscsi_host_vhost_loc,
+       &ibmvscsi_host_vhost_name,
        &ibmvscsi_host_srp_version,
        &ibmvscsi_host_partition_name,
        &ibmvscsi_host_partition_number,
@@ -1674,6 +1894,11 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        atomic_set(&hostdata->request_limit, -1);
        hostdata->host->max_sectors = IBMVSCSI_MAX_SECTORS_DEFAULT;
 
+       if (map_persist_bufs(hostdata)) {
+               dev_err(&vdev->dev, "couldn't map persistent buffers\n");
+               goto persist_bufs_failed;
+       }
+
        rc = ibmvscsi_ops->init_crq_queue(&hostdata->queue, hostdata, max_events);
        if (rc != 0 && rc != H_RESOURCE) {
                dev_err(&vdev->dev, "couldn't initialize crq. rc=%d\n", rc);
@@ -1687,6 +1912,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
        host->max_lun = 8;
        host->max_id = max_id;
        host->max_channel = max_channel;
+       host->max_cmd_len = 16;
 
        if (scsi_add_host(hostdata->host, hostdata->dev))
                goto add_host_failed;
@@ -1733,6 +1959,8 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
       init_pool_failed:
        ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata, max_events);
       init_crq_failed:
+       unmap_persist_bufs(hostdata);
+      persist_bufs_failed:
        scsi_host_put(host);
       scsi_host_alloc_failed:
        return -1;
@@ -1741,6 +1969,7 @@ static int ibmvscsi_probe(struct vio_dev *vdev, const struct vio_device_id *id)
 static int ibmvscsi_remove(struct vio_dev *vdev)
 {
        struct ibmvscsi_host_data *hostdata = vdev->dev.driver_data;
+       unmap_persist_bufs(hostdata);
        release_event_pool(&hostdata->pool, hostdata);
        ibmvscsi_ops->release_crq_queue(&hostdata->queue, hostdata,
                                        max_events);
index 2d4339d5e16e4e3c9cfed8b7b4fe30b87cb84932..76425303def0c0265b89db3ea656132cf7b756c8 100644 (file)
@@ -90,6 +90,7 @@ struct event_pool {
 /* all driver data associated with a host adapter */
 struct ibmvscsi_host_data {
        atomic_t request_limit;
+       int client_migrated;
        struct device *dev;
        struct event_pool pool;
        struct crq_queue queue;
@@ -97,6 +98,9 @@ struct ibmvscsi_host_data {
        struct list_head sent;
        struct Scsi_Host *host;
        struct mad_adapter_info_data madapter_info;
+       struct capabilities caps;
+       dma_addr_t caps_addr;
+       dma_addr_t adapter_info_addr;
 };
 
 /* routines for managing a command/response queue */
index 204604501ad8cc347a3c01bc27205fc2bf614982..2cd735d1d1962fe43eee7b65f85f1774bfd95fef 100644 (file)
@@ -37,6 +37,7 @@
 
 #define SRP_VERSION "16.a"
 #define SRP_MAX_IU_LEN 256
+#define SRP_MAX_LOC_LEN 32
 
 union srp_iu {
        struct srp_login_req login_req;
@@ -86,7 +87,37 @@ enum viosrp_mad_types {
        VIOSRP_EMPTY_IU_TYPE = 0x01,
        VIOSRP_ERROR_LOG_TYPE = 0x02,
        VIOSRP_ADAPTER_INFO_TYPE = 0x03,
-       VIOSRP_HOST_CONFIG_TYPE = 0x04
+       VIOSRP_HOST_CONFIG_TYPE = 0x04,
+       VIOSRP_CAPABILITIES_TYPE = 0x05,
+       VIOSRP_ENABLE_FAST_FAIL = 0x08,
+};
+
+enum viosrp_mad_status {
+       VIOSRP_MAD_SUCCESS = 0x00,
+       VIOSRP_MAD_NOT_SUPPORTED = 0xF1,
+       VIOSRP_MAD_FAILED = 0xF7,
+};
+
+enum viosrp_capability_type {
+       MIGRATION_CAPABILITIES = 0x01,
+       RESERVATION_CAPABILITIES = 0x02,
+};
+
+enum viosrp_capability_support {
+       SERVER_DOES_NOT_SUPPORTS_CAP = 0x0,
+       SERVER_SUPPORTS_CAP = 0x01,
+       SERVER_CAP_DATA = 0x02,
+};
+
+enum viosrp_reserve_type {
+       CLIENT_RESERVE_SCSI_2 = 0x01,
+};
+
+enum viosrp_capability_flag {
+       CLIENT_MIGRATED = 0x01,
+       CLIENT_RECONNECT = 0x02,
+       CAP_LIST_SUPPORTED = 0x04,
+       CAP_LIST_DATA = 0x08,
 };
 
 /* 
@@ -127,11 +158,46 @@ struct viosrp_host_config {
        u64 buffer;
 };
 
+struct viosrp_fast_fail {
+       struct mad_common common;
+};
+
+struct viosrp_capabilities {
+       struct mad_common common;
+       u64 buffer;
+};
+
+struct mad_capability_common {
+       u32 cap_type;
+       u16 length;
+       u16 server_support;
+};
+
+struct mad_reserve_cap {
+       struct mad_capability_common common;
+       u32 type;
+};
+
+struct mad_migration_cap {
+       struct mad_capability_common common;
+       u32 ecl;
+};
+
+struct capabilities{
+       u32 flags;
+       char name[SRP_MAX_LOC_LEN];
+       char loc[SRP_MAX_LOC_LEN];
+       struct mad_migration_cap migration;
+       struct mad_reserve_cap reserve;
+};
+
 union mad_iu {
        struct viosrp_empty_iu empty_iu;
        struct viosrp_error_log error_log;
        struct viosrp_adapter_info adapter_info;
        struct viosrp_host_config host_config;
+       struct viosrp_fast_fail fast_fail;
+       struct viosrp_capabilities capabilities;
 };
 
 union viosrp_iu {
index dd689ded8609e0709cbb0980d7356359fc6a0524..0f8bc772b1124d910ffd96bf455736152dffcd6e 100644 (file)
@@ -7003,6 +7003,7 @@ static void ipr_pci_perm_failure(struct pci_dev *pdev)
                ioa_cfg->sdt_state = ABORT_DUMP;
        ioa_cfg->reset_retries = IPR_NUM_RESET_RELOAD_RETRIES;
        ioa_cfg->in_ioa_bringdown = 1;
+       ioa_cfg->allow_cmds = 0;
        ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
        spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
 }
@@ -7688,7 +7689,7 @@ static void __ipr_remove(struct pci_dev *pdev)
  * Return value:
  *     none
  **/
-static void ipr_remove(struct pci_dev *pdev)
+static void __devexit ipr_remove(struct pci_dev *pdev)
 {
        struct ipr_ioa_cfg *ioa_cfg = pci_get_drvdata(pdev);
 
@@ -7864,7 +7865,7 @@ static struct pci_driver ipr_driver = {
        .name = IPR_NAME,
        .id_table = ipr_pci_table,
        .probe = ipr_probe,
-       .remove = ipr_remove,
+       .remove = __devexit_p(ipr_remove),
        .shutdown = ipr_shutdown,
        .err_handler = &ipr_err_handler,
 };
index 992af05aacf154f8ee4cbb5ea663fbb22611f5e6..7af9bceb8aa9efa467c3ca58ed4da1694ec7d79c 100644 (file)
@@ -1159,6 +1159,10 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
                atomic_inc(&mp->stats.xid_not_found);
                goto out;
        }
+       if (ep->esb_stat & ESB_ST_COMPLETE) {
+               atomic_inc(&mp->stats.xid_not_found);
+               goto out;
+       }
        if (ep->rxid == FC_XID_UNKNOWN)
                ep->rxid = ntohs(fh->fh_rx_id);
        if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
index 521f996f9b131dd96c654530cfd73b06a961560c..ad8b747837b08c8922f56d4690f091d304e2321e 100644 (file)
@@ -1896,7 +1896,7 @@ static void fc_io_compl(struct fc_fcp_pkt *fsp)
                sc_cmd->result = (DID_ERROR << 16) | fsp->cdb_status;
                break;
        case FC_CMD_ABORTED:
-               sc_cmd->result = (DID_ABORT << 16) | fsp->io_status;
+               sc_cmd->result = (DID_ERROR << 16) | fsp->io_status;
                break;
        case FC_CMD_TIME_OUT:
                sc_cmd->result = (DID_BUS_BUSY << 16) | fsp->io_status;
index 747d73c5c8affa7efa94bf0ad94c705ea9063394..7bfbff7e0efb256656d1ef6ce8a5e8ac68fa9c69 100644 (file)
@@ -478,7 +478,7 @@ static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
        if (PTR_ERR(fp) == -FC_EX_CLOSED)
                return fc_rport_error(rport, fp);
 
-       if (rdata->retries < rdata->local_port->max_retry_count) {
+       if (rdata->retries < rdata->local_port->max_rport_retry_count) {
                FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
                               PTR_ERR(fp), fc_rport_state(rport));
                rdata->retries++;
@@ -1330,7 +1330,7 @@ int fc_rport_init(struct fc_lport *lport)
 }
 EXPORT_SYMBOL(fc_rport_init);
 
-int fc_setup_rport()
+int fc_setup_rport(void)
 {
        rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
        if (!rport_event_queue)
@@ -1339,7 +1339,7 @@ int fc_setup_rport()
 }
 EXPORT_SYMBOL(fc_setup_rport);
 
-void fc_destroy_rport()
+void fc_destroy_rport(void)
 {
        destroy_workqueue(rport_event_queue);
 }
index e72b4ad47d35bd6ff3855ec038eed77b15c1719d..59908aead531e6257ad63f1767cf736c82585956 100644 (file)
@@ -81,7 +81,8 @@ inline void iscsi_conn_queue_work(struct iscsi_conn *conn)
        struct Scsi_Host *shost = conn->session->host;
        struct iscsi_host *ihost = shost_priv(shost);
 
-       queue_work(ihost->workq, &conn->xmitwork);
+       if (ihost->workq)
+               queue_work(ihost->workq, &conn->xmitwork);
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
 
@@ -109,11 +110,9 @@ iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
                 * if the window closed with IO queued, then kick the
                 * xmit thread
                 */
-               if (!list_empty(&session->leadconn->xmitqueue) ||
-                   !list_empty(&session->leadconn->mgmtqueue)) {
-                       if (!(session->tt->caps & CAP_DATA_PATH_OFFLOAD))
-                               iscsi_conn_queue_work(session->leadconn);
-               }
+               if (!list_empty(&session->leadconn->cmdqueue) ||
+                   !list_empty(&session->leadconn->mgmtqueue))
+                       iscsi_conn_queue_work(session->leadconn);
        }
 }
 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
@@ -257,9 +256,11 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
        itt_t itt;
        int rc;
 
-       rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
-       if (rc)
-               return rc;
+       if (conn->session->tt->alloc_pdu) {
+               rc = conn->session->tt->alloc_pdu(task, ISCSI_OP_SCSI_CMD);
+               if (rc)
+                       return rc;
+       }
        hdr = (struct iscsi_cmd *) task->hdr;
        itt = hdr->itt;
        memset(hdr, 0, sizeof(*hdr));
@@ -364,7 +365,6 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
                return -EIO;
 
        task->state = ISCSI_TASK_RUNNING;
-       list_move_tail(&task->running, &conn->run_list);
 
        conn->scsicmd_pdus_cnt++;
        ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -380,26 +380,25 @@ static int iscsi_prep_scsi_cmd_pdu(struct iscsi_task *task)
 }
 
 /**
- * iscsi_complete_command - finish a task
+ * iscsi_free_task - free a task
  * @task: iscsi cmd task
  *
  * Must be called with session lock.
  * This function returns the scsi command to scsi-ml or cleans
  * up mgmt tasks then returns the task to the pool.
  */
-static void iscsi_complete_command(struct iscsi_task *task)
+static void iscsi_free_task(struct iscsi_task *task)
 {
        struct iscsi_conn *conn = task->conn;
        struct iscsi_session *session = conn->session;
        struct scsi_cmnd *sc = task->sc;
 
+       ISCSI_DBG_SESSION(session, "freeing task itt 0x%x state %d sc %p\n",
+                         task->itt, task->state, task->sc);
+
        session->tt->cleanup_task(task);
-       list_del_init(&task->running);
-       task->state = ISCSI_TASK_COMPLETED;
+       task->state = ISCSI_TASK_FREE;
        task->sc = NULL;
-
-       if (conn->task == task)
-               conn->task = NULL;
        /*
         * login task is preallocated so do not free
         */
@@ -408,9 +407,6 @@ static void iscsi_complete_command(struct iscsi_task *task)
 
        __kfifo_put(session->cmdpool.queue, (void*)&task, sizeof(void*));
 
-       if (conn->ping_task == task)
-               conn->ping_task = NULL;
-
        if (sc) {
                task->sc = NULL;
                /* SCSI eh reuses commands to verify us */
@@ -433,7 +429,7 @@ EXPORT_SYMBOL_GPL(__iscsi_get_task);
 static void __iscsi_put_task(struct iscsi_task *task)
 {
        if (atomic_dec_and_test(&task->refcount))
-               iscsi_complete_command(task);
+               iscsi_free_task(task);
 }
 
 void iscsi_put_task(struct iscsi_task *task)
@@ -446,26 +442,74 @@ void iscsi_put_task(struct iscsi_task *task)
 }
 EXPORT_SYMBOL_GPL(iscsi_put_task);
 
+/**
+ * iscsi_complete_task - finish a task
+ * @task: iscsi cmd task
+ * @state: state to complete task with
+ *
+ * Must be called with session lock.
+ */
+static void iscsi_complete_task(struct iscsi_task *task, int state)
+{
+       struct iscsi_conn *conn = task->conn;
+
+       ISCSI_DBG_SESSION(conn->session,
+                         "complete task itt 0x%x state %d sc %p\n",
+                         task->itt, task->state, task->sc);
+       if (task->state == ISCSI_TASK_COMPLETED ||
+           task->state == ISCSI_TASK_ABRT_TMF ||
+           task->state == ISCSI_TASK_ABRT_SESS_RECOV)
+               return;
+       WARN_ON_ONCE(task->state == ISCSI_TASK_FREE);
+       task->state = state;
+
+       if (!list_empty(&task->running))
+               list_del_init(&task->running);
+
+       if (conn->task == task)
+               conn->task = NULL;
+
+       if (conn->ping_task == task)
+               conn->ping_task = NULL;
+
+       /* release get from queueing */
+       __iscsi_put_task(task);
+}
+
 /*
- * session lock must be held
+ * session lock must be held and if not called for a task that is
+ * still pending or from the xmit thread, then xmit thread must
+ * be suspended.
  */
-static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
-                        int err)
+static void fail_scsi_task(struct iscsi_task *task, int err)
 {
+       struct iscsi_conn *conn = task->conn;
        struct scsi_cmnd *sc;
+       int state;
 
+       /*
+        * if a command completes and we get a successful tmf response
+        * we will hit this because the scsi eh abort code does not take
+        * a ref to the task.
+        */
        sc = task->sc;
        if (!sc)
                return;
 
-       if (task->state == ISCSI_TASK_PENDING)
+       if (task->state == ISCSI_TASK_PENDING) {
                /*
                 * cmd never made it to the xmit thread, so we should not count
                 * the cmd in the sequencing
                 */
                conn->session->queued_cmdsn--;
+               /* it was never sent so just complete like normal */
+               state = ISCSI_TASK_COMPLETED;
+       } else if (err == DID_TRANSPORT_DISRUPTED)
+               state = ISCSI_TASK_ABRT_SESS_RECOV;
+       else
+               state = ISCSI_TASK_ABRT_TMF;
 
-       sc->result = err;
+       sc->result = err << 16;
        if (!scsi_bidi_cmnd(sc))
                scsi_set_resid(sc, scsi_bufflen(sc));
        else {
@@ -473,10 +517,7 @@ static void fail_command(struct iscsi_conn *conn, struct iscsi_task *task,
                scsi_in(sc)->resid = scsi_in(sc)->length;
        }
 
-       if (conn->task == task)
-               conn->task = NULL;
-       /* release ref from queuecommand */
-       __iscsi_put_task(task);
+       iscsi_complete_task(task, state);
 }
 
 static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
@@ -516,7 +557,6 @@ static int iscsi_prep_mgmt_task(struct iscsi_conn *conn,
                session->state = ISCSI_STATE_LOGGING_OUT;
 
        task->state = ISCSI_TASK_RUNNING;
-       list_move_tail(&task->running, &conn->mgmt_run_list);
        ISCSI_DBG_SESSION(session, "mgmtpdu [op 0x%x hdr->itt 0x%x "
                          "datalen %d]\n", hdr->opcode & ISCSI_OPCODE_MASK,
                          hdr->itt, task->data_count);
@@ -528,6 +568,7 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                      char *data, uint32_t data_size)
 {
        struct iscsi_session *session = conn->session;
+       struct iscsi_host *ihost = shost_priv(session->host);
        struct iscsi_task *task;
        itt_t itt;
 
@@ -544,6 +585,9 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                 */
                task = conn->login_task;
        else {
+               if (session->state != ISCSI_STATE_LOGGED_IN)
+                       return NULL;
+
                BUG_ON(conn->c_stage == ISCSI_CONN_INITIAL_STAGE);
                BUG_ON(conn->c_stage == ISCSI_CONN_STOPPED);
 
@@ -559,6 +603,8 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
        atomic_set(&task->refcount, 1);
        task->conn = conn;
        task->sc = NULL;
+       INIT_LIST_HEAD(&task->running);
+       task->state = ISCSI_TASK_PENDING;
 
        if (data_size) {
                memcpy(task->data, data, data_size);
@@ -566,11 +612,14 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
        } else
                task->data_count = 0;
 
-       if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
-               iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
-                                "pdu for mgmt task.\n");
-               goto requeue_task;
+       if (conn->session->tt->alloc_pdu) {
+               if (conn->session->tt->alloc_pdu(task, hdr->opcode)) {
+                       iscsi_conn_printk(KERN_ERR, conn, "Could not allocate "
+                                        "pdu for mgmt task.\n");
+                       goto free_task;
+               }
        }
+
        itt = task->hdr->itt;
        task->hdr_len = sizeof(struct iscsi_hdr);
        memcpy(task->hdr, hdr, sizeof(struct iscsi_hdr));
@@ -583,30 +632,22 @@ __iscsi_conn_send_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                                                   task->conn->session->age);
        }
 
-       INIT_LIST_HEAD(&task->running);
-       list_add_tail(&task->running, &conn->mgmtqueue);
-
-       if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+       if (!ihost->workq) {
                if (iscsi_prep_mgmt_task(conn, task))
                        goto free_task;
 
                if (session->tt->xmit_task(task))
                        goto free_task;
-
-       } else
+       } else {
+               list_add_tail(&task->running, &conn->mgmtqueue);
                iscsi_conn_queue_work(conn);
+       }
 
        return task;
 
 free_task:
        __iscsi_put_task(task);
        return NULL;
-
-requeue_task:
-       if (task != conn->login_task)
-               __kfifo_put(session->cmdpool.queue, (void*)&task,
-                           sizeof(void*));
-       return NULL;
 }
 
 int iscsi_conn_send_pdu(struct iscsi_cls_conn *cls_conn, struct iscsi_hdr *hdr,
@@ -701,11 +742,10 @@ invalid_datalen:
                        sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
        }
 out:
-       ISCSI_DBG_SESSION(session, "done [sc %p res %d itt 0x%x]\n",
+       ISCSI_DBG_SESSION(session, "cmd rsp done [sc %p res %d itt 0x%x]\n",
                          sc, sc->result, task->itt);
        conn->scsirsp_pdus_cnt++;
-
-       __iscsi_put_task(task);
+       iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 }
 
 /**
@@ -724,6 +764,7 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
        if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
                return;
 
+       iscsi_update_cmdsn(conn->session, (struct iscsi_nopin *)hdr);
        sc->result = (DID_OK << 16) | rhdr->cmd_status;
        conn->exp_statsn = be32_to_cpu(rhdr->statsn) + 1;
        if (rhdr->flags & (ISCSI_FLAG_DATA_UNDERFLOW |
@@ -738,8 +779,11 @@ iscsi_data_in_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                        sc->result = (DID_BAD_TARGET << 16) | rhdr->cmd_status;
        }
 
+       ISCSI_DBG_SESSION(conn->session, "data in with status done "
+                         "[sc %p res %d itt 0x%x]\n",
+                         sc, sc->result, task->itt);
        conn->scsirsp_pdus_cnt++;
-       __iscsi_put_task(task);
+       iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 }
 
 static void iscsi_tmf_rsp(struct iscsi_conn *conn, struct iscsi_hdr *hdr)
@@ -823,7 +867,7 @@ static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
  *
  * The session lock must be held.
  */
-static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
+struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
 {
        struct iscsi_session *session = conn->session;
        int i;
@@ -840,6 +884,7 @@ static struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *conn, itt_t itt)
 
        return session->cmds[i];
 }
+EXPORT_SYMBOL_GPL(iscsi_itt_to_task);
 
 /**
  * __iscsi_complete_pdu - complete pdu
@@ -959,7 +1004,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                }
 
                iscsi_tmf_rsp(conn, hdr);
-               __iscsi_put_task(task);
+               iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
                break;
        case ISCSI_OP_NOOP_IN:
                iscsi_update_cmdsn(session, (struct iscsi_nopin*)hdr);
@@ -977,7 +1022,7 @@ int __iscsi_complete_pdu(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
                        goto recv_pdu;
 
                mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
-               __iscsi_put_task(task);
+               iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
                break;
        default:
                rc = ISCSI_ERR_BAD_OPCODE;
@@ -989,7 +1034,7 @@ out:
 recv_pdu:
        if (iscsi_recv_pdu(conn->cls_conn, hdr, data, datalen))
                rc = ISCSI_ERR_CONN_FAILED;
-       __iscsi_put_task(task);
+       iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
        return rc;
 }
 EXPORT_SYMBOL_GPL(__iscsi_complete_pdu);
@@ -1166,7 +1211,12 @@ void iscsi_requeue_task(struct iscsi_task *task)
 {
        struct iscsi_conn *conn = task->conn;
 
-       list_move_tail(&task->running, &conn->requeue);
+       /*
+        * this may be on the requeue list already if the xmit_task callout
+        * is handling the r2ts while we are adding new ones
+        */
+       if (list_empty(&task->running))
+               list_add_tail(&task->running, &conn->requeue);
        iscsi_conn_queue_work(conn);
 }
 EXPORT_SYMBOL_GPL(iscsi_requeue_task);
@@ -1206,6 +1256,7 @@ check_mgmt:
        while (!list_empty(&conn->mgmtqueue)) {
                conn->task = list_entry(conn->mgmtqueue.next,
                                         struct iscsi_task, running);
+               list_del_init(&conn->task->running);
                if (iscsi_prep_mgmt_task(conn, conn->task)) {
                        __iscsi_put_task(conn->task);
                        conn->task = NULL;
@@ -1217,23 +1268,26 @@ check_mgmt:
        }
 
        /* process pending command queue */
-       while (!list_empty(&conn->xmitqueue)) {
+       while (!list_empty(&conn->cmdqueue)) {
                if (conn->tmf_state == TMF_QUEUED)
                        break;
 
-               conn->task = list_entry(conn->xmitqueue.next,
+               conn->task = list_entry(conn->cmdqueue.next,
                                         struct iscsi_task, running);
+               list_del_init(&conn->task->running);
                if (conn->session->state == ISCSI_STATE_LOGGING_OUT) {
-                       fail_command(conn, conn->task, DID_IMM_RETRY << 16);
+                       fail_scsi_task(conn->task, DID_IMM_RETRY);
                        continue;
                }
                rc = iscsi_prep_scsi_cmd_pdu(conn->task);
                if (rc) {
                        if (rc == -ENOMEM) {
+                               list_add_tail(&conn->task->running,
+                                             &conn->cmdqueue);
                                conn->task = NULL;
                                goto again;
                        } else
-                               fail_command(conn, conn->task, DID_ABORT << 16);
+                               fail_scsi_task(conn->task, DID_ABORT);
                        continue;
                }
                rc = iscsi_xmit_task(conn);
@@ -1260,8 +1314,8 @@ check_mgmt:
 
                conn->task = list_entry(conn->requeue.next,
                                         struct iscsi_task, running);
+               list_del_init(&conn->task->running);
                conn->task->state = ISCSI_TASK_RUNNING;
-               list_move_tail(conn->requeue.next, &conn->run_list);
                rc = iscsi_xmit_task(conn);
                if (rc)
                        goto again;
@@ -1328,6 +1382,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 {
        struct iscsi_cls_session *cls_session;
        struct Scsi_Host *host;
+       struct iscsi_host *ihost;
        int reason = 0;
        struct iscsi_session *session;
        struct iscsi_conn *conn;
@@ -1338,6 +1393,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
        sc->SCp.ptr = NULL;
 
        host = sc->device->host;
+       ihost = shost_priv(host);
        spin_unlock(host->host_lock);
 
        cls_session = starget_to_session(scsi_target(sc->device));
@@ -1350,13 +1406,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
                goto fault;
        }
 
-       /*
-        * ISCSI_STATE_FAILED is a temp. state. The recovery
-        * code will decide what is best to do with command queued
-        * during this time
-        */
-       if (session->state != ISCSI_STATE_LOGGED_IN &&
-           session->state != ISCSI_STATE_FAILED) {
+       if (session->state != ISCSI_STATE_LOGGED_IN) {
                /*
                 * to handle the race between when we set the recovery state
                 * and block the session we requeue here (commands could
@@ -1364,12 +1414,15 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
                 * up because the block code is not locked)
                 */
                switch (session->state) {
+               case ISCSI_STATE_FAILED:
                case ISCSI_STATE_IN_RECOVERY:
                        reason = FAILURE_SESSION_IN_RECOVERY;
-                       goto reject;
+                       sc->result = DID_IMM_RETRY << 16;
+                       break;
                case ISCSI_STATE_LOGGING_OUT:
                        reason = FAILURE_SESSION_LOGGING_OUT;
-                       goto reject;
+                       sc->result = DID_IMM_RETRY << 16;
+                       break;
                case ISCSI_STATE_RECOVERY_FAILED:
                        reason = FAILURE_SESSION_RECOVERY_TIMEOUT;
                        sc->result = DID_TRANSPORT_FAILFAST << 16;
@@ -1402,9 +1455,8 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
                reason = FAILURE_OOM;
                goto reject;
        }
-       list_add_tail(&task->running, &conn->xmitqueue);
 
-       if (session->tt->caps & CAP_DATA_PATH_OFFLOAD) {
+       if (!ihost->workq) {
                reason = iscsi_prep_scsi_cmd_pdu(task);
                if (reason) {
                        if (reason == -ENOMEM) {
@@ -1419,8 +1471,10 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
                        reason = FAILURE_SESSION_NOT_READY;
                        goto prepd_reject;
                }
-       } else
+       } else {
+               list_add_tail(&task->running, &conn->cmdqueue);
                iscsi_conn_queue_work(conn);
+       }
 
        session->queued_cmdsn++;
        spin_unlock(&session->lock);
@@ -1429,7 +1483,7 @@ int iscsi_queuecommand(struct scsi_cmnd *sc, void (*done)(struct scsi_cmnd *))
 
 prepd_reject:
        sc->scsi_done = NULL;
-       iscsi_complete_command(task);
+       iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 reject:
        spin_unlock(&session->lock);
        ISCSI_DBG_SESSION(session, "cmd 0x%x rejected (%d)\n",
@@ -1439,7 +1493,7 @@ reject:
 
 prepd_fault:
        sc->scsi_done = NULL;
-       iscsi_complete_command(task);
+       iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
 fault:
        spin_unlock(&session->lock);
        ISCSI_DBG_SESSION(session, "iscsi: cmd 0x%x is not queued (%d)\n",
@@ -1608,44 +1662,24 @@ static int iscsi_exec_task_mgmt_fn(struct iscsi_conn *conn,
  * Fail commands. session lock held and recv side suspended and xmit
  * thread flushed
  */
-static void fail_all_commands(struct iscsi_conn *conn, unsigned lun,
-                             int error)
+static void fail_scsi_tasks(struct iscsi_conn *conn, unsigned lun,
+                           int error)
 {
-       struct iscsi_task *task, *tmp;
-
-       if (conn->task) {
-               if (lun == -1 ||
-                   (conn->task->sc && conn->task->sc->device->lun == lun))
-                       conn->task = NULL;
-       }
+       struct iscsi_task *task;
+       int i;
 
-       /* flush pending */
-       list_for_each_entry_safe(task, tmp, &conn->xmitqueue, running) {
-               if (lun == task->sc->device->lun || lun == -1) {
-                       ISCSI_DBG_SESSION(conn->session,
-                                         "failing pending sc %p itt 0x%x\n",
-                                         task->sc, task->itt);
-                       fail_command(conn, task, error << 16);
-               }
-       }
+       for (i = 0; i < conn->session->cmds_max; i++) {
+               task = conn->session->cmds[i];
+               if (!task->sc || task->state == ISCSI_TASK_FREE)
+                       continue;
 
-       list_for_each_entry_safe(task, tmp, &conn->requeue, running) {
-               if (lun == task->sc->device->lun || lun == -1) {
-                       ISCSI_DBG_SESSION(conn->session,
-                                         "failing requeued sc %p itt 0x%x\n",
-                                         task->sc, task->itt);
-                       fail_command(conn, task, error << 16);
-               }
-       }
+               if (lun != -1 && lun != task->sc->device->lun)
+                       continue;
 
-       /* fail all other running */
-       list_for_each_entry_safe(task, tmp, &conn->run_list, running) {
-               if (lun == task->sc->device->lun || lun == -1) {
-                       ISCSI_DBG_SESSION(conn->session,
-                                        "failing in progress sc %p itt 0x%x\n",
-                                        task->sc, task->itt);
-                       fail_command(conn, task, error << 16);
-               }
+               ISCSI_DBG_SESSION(conn->session,
+                                 "failing sc %p itt 0x%x state %d\n",
+                                 task->sc, task->itt, task->state);
+               fail_scsi_task(task, error);
        }
 }
 
@@ -1655,7 +1689,7 @@ void iscsi_suspend_tx(struct iscsi_conn *conn)
        struct iscsi_host *ihost = shost_priv(shost);
 
        set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-       if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
+       if (ihost->workq)
                flush_workqueue(ihost->workq);
 }
 EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
@@ -1663,8 +1697,23 @@ EXPORT_SYMBOL_GPL(iscsi_suspend_tx);
 static void iscsi_start_tx(struct iscsi_conn *conn)
 {
        clear_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
-       if (!(conn->session->tt->caps & CAP_DATA_PATH_OFFLOAD))
-               iscsi_conn_queue_work(conn);
+       iscsi_conn_queue_work(conn);
+}
+
+/*
+ * We want to make sure a ping is in flight. It has timed out.
+ * And we are not busy processing a pdu that is making
+ * progress but got started before the ping and is taking a while
+ * to complete so the ping is just stuck behind it in a queue.
+ */
+static int iscsi_has_ping_timed_out(struct iscsi_conn *conn)
+{
+       if (conn->ping_task &&
+           time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
+                          (conn->ping_timeout * HZ), jiffies))
+               return 1;
+       else
+               return 0;
 }
 
 static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
@@ -1702,16 +1751,20 @@ static enum blk_eh_timer_return iscsi_eh_cmd_timed_out(struct scsi_cmnd *scmd)
         * if the ping timedout then we are in the middle of cleaning up
         * and can let the iscsi eh handle it
         */
-       if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ) +
-                           (conn->ping_timeout * HZ), jiffies))
+       if (iscsi_has_ping_timed_out(conn)) {
                rc = BLK_EH_RESET_TIMER;
+               goto done;
+       }
        /*
         * if we are about to check the transport then give the command
         * more time
         */
        if (time_before_eq(conn->last_recv + (conn->recv_timeout * HZ),
-                          jiffies))
+                          jiffies)) {
                rc = BLK_EH_RESET_TIMER;
+               goto done;
+       }
+
        /* if in the middle of checking the transport then give us more time */
        if (conn->ping_task)
                rc = BLK_EH_RESET_TIMER;
@@ -1738,13 +1791,13 @@ static void iscsi_check_transport_timeouts(unsigned long data)
 
        recv_timeout *= HZ;
        last_recv = conn->last_recv;
-       if (conn->ping_task &&
-           time_before_eq(conn->last_ping + (conn->ping_timeout * HZ),
-                          jiffies)) {
+
+       if (iscsi_has_ping_timed_out(conn)) {
                iscsi_conn_printk(KERN_ERR, conn, "ping timeout of %d secs "
-                                 "expired, last rx %lu, last ping %lu, "
-                                 "now %lu\n", conn->ping_timeout, last_recv,
-                                 conn->last_ping, jiffies);
+                                 "expired, recv timeout %d, last rx %lu, "
+                                 "last ping %lu, now %lu\n",
+                                 conn->ping_timeout, conn->recv_timeout,
+                                 last_recv, conn->last_ping, jiffies);
                spin_unlock(&session->lock);
                iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
                return;
@@ -1788,6 +1841,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
        cls_session = starget_to_session(scsi_target(sc->device));
        session = cls_session->dd_data;
 
+       ISCSI_DBG_SESSION(session, "aborting sc %p\n", sc);
+
        mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->lock);
        /*
@@ -1810,6 +1865,8 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
            sc->SCp.phase != session->age) {
                spin_unlock_bh(&session->lock);
                mutex_unlock(&session->eh_mutex);
+               ISCSI_DBG_SESSION(session, "failing abort due to dropped "
+                                 "session.\n");
                return FAILED;
        }
 
@@ -1829,7 +1886,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
        }
 
        if (task->state == ISCSI_TASK_PENDING) {
-               fail_command(conn, task, DID_ABORT << 16);
+               fail_scsi_task(task, DID_ABORT);
                goto success;
        }
 
@@ -1860,7 +1917,7 @@ int iscsi_eh_abort(struct scsi_cmnd *sc)
                 * then sent more data for the cmd.
                 */
                spin_lock(&session->lock);
-               fail_command(conn, task, DID_ABORT << 16);
+               fail_scsi_task(task, DID_ABORT);
                conn->tmf_state = TMF_INITIAL;
                spin_unlock(&session->lock);
                iscsi_start_tx(conn);
@@ -1967,7 +2024,7 @@ int iscsi_eh_device_reset(struct scsi_cmnd *sc)
        iscsi_suspend_tx(conn);
 
        spin_lock_bh(&session->lock);
-       fail_all_commands(conn, sc->device->lun, DID_ERROR);
+       fail_scsi_tasks(conn, sc->device->lun, DID_ERROR);
        conn->tmf_state = TMF_INITIAL;
        spin_unlock_bh(&session->lock);
 
@@ -2274,6 +2331,7 @@ iscsi_session_setup(struct iscsi_transport *iscsit, struct Scsi_Host *shost,
                if (cmd_task_size)
                        task->dd_data = &task[1];
                task->itt = cmd_i;
+               task->state = ISCSI_TASK_FREE;
                INIT_LIST_HEAD(&task->running);
        }
 
@@ -2360,10 +2418,8 @@ iscsi_conn_setup(struct iscsi_cls_session *cls_session, int dd_size,
        conn->transport_timer.data = (unsigned long)conn;
        conn->transport_timer.function = iscsi_check_transport_timeouts;
 
-       INIT_LIST_HEAD(&conn->run_list);
-       INIT_LIST_HEAD(&conn->mgmt_run_list);
        INIT_LIST_HEAD(&conn->mgmtqueue);
-       INIT_LIST_HEAD(&conn->xmitqueue);
+       INIT_LIST_HEAD(&conn->cmdqueue);
        INIT_LIST_HEAD(&conn->requeue);
        INIT_WORK(&conn->xmitwork, iscsi_xmitworker);
 
@@ -2531,27 +2587,28 @@ int iscsi_conn_start(struct iscsi_cls_conn *cls_conn)
 EXPORT_SYMBOL_GPL(iscsi_conn_start);
 
 static void
-flush_control_queues(struct iscsi_session *session, struct iscsi_conn *conn)
+fail_mgmt_tasks(struct iscsi_session *session, struct iscsi_conn *conn)
 {
-       struct iscsi_task *task, *tmp;
+       struct iscsi_task *task;
+       int i, state;
 
-       /* handle pending */
-       list_for_each_entry_safe(task, tmp, &conn->mgmtqueue, running) {
-               ISCSI_DBG_SESSION(session, "flushing pending mgmt task "
-                                 "itt 0x%x\n", task->itt);
-               /* release ref from prep task */
-               __iscsi_put_task(task);
-       }
+       for (i = 0; i < conn->session->cmds_max; i++) {
+               task = conn->session->cmds[i];
+               if (task->sc)
+                       continue;
 
-       /* handle running */
-       list_for_each_entry_safe(task, tmp, &conn->mgmt_run_list, running) {
-               ISCSI_DBG_SESSION(session, "flushing running mgmt task "
-                                 "itt 0x%x\n", task->itt);
-               /* release ref from prep task */
-               __iscsi_put_task(task);
-       }
+               if (task->state == ISCSI_TASK_FREE)
+                       continue;
+
+               ISCSI_DBG_SESSION(conn->session,
+                                 "failing mgmt itt 0x%x state %d\n",
+                                 task->itt, task->state);
+               state = ISCSI_TASK_ABRT_SESS_RECOV;
+               if (task->state == ISCSI_TASK_PENDING)
+                       state = ISCSI_TASK_COMPLETED;
+               iscsi_complete_task(task, state);
 
-       conn->task = NULL;
+       }
 }
 
 static void iscsi_start_session_recovery(struct iscsi_session *session,
@@ -2559,8 +2616,6 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
 {
        int old_stop_stage;
 
-       del_timer_sync(&conn->transport_timer);
-
        mutex_lock(&session->eh_mutex);
        spin_lock_bh(&session->lock);
        if (conn->stop_stage == STOP_CONN_TERM) {
@@ -2578,13 +2633,17 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
                session->state = ISCSI_STATE_TERMINATE;
        else if (conn->stop_stage != STOP_CONN_RECOVER)
                session->state = ISCSI_STATE_IN_RECOVERY;
+       spin_unlock_bh(&session->lock);
+
+       del_timer_sync(&conn->transport_timer);
+       iscsi_suspend_tx(conn);
 
+       spin_lock_bh(&session->lock);
        old_stop_stage = conn->stop_stage;
        conn->stop_stage = flag;
        conn->c_stage = ISCSI_CONN_STOPPED;
        spin_unlock_bh(&session->lock);
 
-       iscsi_suspend_tx(conn);
        /*
         * for connection level recovery we should not calculate
         * header digest. conn->hdr_size used for optimization
@@ -2605,11 +2664,8 @@ static void iscsi_start_session_recovery(struct iscsi_session *session,
         * flush queues.
         */
        spin_lock_bh(&session->lock);
-       if (flag == STOP_CONN_RECOVER)
-               fail_all_commands(conn, -1, DID_TRANSPORT_DISRUPTED);
-       else
-               fail_all_commands(conn, -1, DID_ERROR);
-       flush_control_queues(session, conn);
+       fail_scsi_tasks(conn, -1, DID_TRANSPORT_DISRUPTED);
+       fail_mgmt_tasks(session, conn);
        spin_unlock_bh(&session->lock);
        mutex_unlock(&session->eh_mutex);
 }
@@ -2651,6 +2707,23 @@ int iscsi_conn_bind(struct iscsi_cls_session *cls_session,
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_bind);
 
+static int iscsi_switch_str_param(char **param, char *new_val_buf)
+{
+       char *new_val;
+
+       if (*param) {
+               if (!strcmp(*param, new_val_buf))
+                       return 0;
+       }
+
+       new_val = kstrdup(new_val_buf, GFP_NOIO);
+       if (!new_val)
+               return -ENOMEM;
+
+       kfree(*param);
+       *param = new_val;
+       return 0;
+}
 
 int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
                    enum iscsi_param param, char *buf, int buflen)
@@ -2723,38 +2796,15 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
                sscanf(buf, "%u", &conn->exp_statsn);
                break;
        case ISCSI_PARAM_USERNAME:
-               kfree(session->username);
-               session->username = kstrdup(buf, GFP_KERNEL);
-               if (!session->username)
-                       return -ENOMEM;
-               break;
+               return iscsi_switch_str_param(&session->username, buf);
        case ISCSI_PARAM_USERNAME_IN:
-               kfree(session->username_in);
-               session->username_in = kstrdup(buf, GFP_KERNEL);
-               if (!session->username_in)
-                       return -ENOMEM;
-               break;
+               return iscsi_switch_str_param(&session->username_in, buf);
        case ISCSI_PARAM_PASSWORD:
-               kfree(session->password);
-               session->password = kstrdup(buf, GFP_KERNEL);
-               if (!session->password)
-                       return -ENOMEM;
-               break;
+               return iscsi_switch_str_param(&session->password, buf);
        case ISCSI_PARAM_PASSWORD_IN:
-               kfree(session->password_in);
-               session->password_in = kstrdup(buf, GFP_KERNEL);
-               if (!session->password_in)
-                       return -ENOMEM;
-               break;
+               return iscsi_switch_str_param(&session->password_in, buf);
        case ISCSI_PARAM_TARGET_NAME:
-               /* this should not change between logins */
-               if (session->targetname)
-                       break;
-
-               session->targetname = kstrdup(buf, GFP_KERNEL);
-               if (!session->targetname)
-                       return -ENOMEM;
-               break;
+               return iscsi_switch_str_param(&session->targetname, buf);
        case ISCSI_PARAM_TPGT:
                sscanf(buf, "%d", &session->tpgt);
                break;
@@ -2762,25 +2812,11 @@ int iscsi_set_param(struct iscsi_cls_conn *cls_conn,
                sscanf(buf, "%d", &conn->persistent_port);
                break;
        case ISCSI_PARAM_PERSISTENT_ADDRESS:
-               /*
-                * this is the address returned in discovery so it should
-                * not change between logins.
-                */
-               if (conn->persistent_address)
-                       break;
-
-               conn->persistent_address = kstrdup(buf, GFP_KERNEL);
-               if (!conn->persistent_address)
-                       return -ENOMEM;
-               break;
+               return iscsi_switch_str_param(&conn->persistent_address, buf);
        case ISCSI_PARAM_IFACE_NAME:
-               if (!session->ifacename)
-                       session->ifacename = kstrdup(buf, GFP_KERNEL);
-               break;
+               return iscsi_switch_str_param(&session->ifacename, buf);
        case ISCSI_PARAM_INITIATOR_NAME:
-               if (!session->initiatorname)
-                       session->initiatorname = kstrdup(buf, GFP_KERNEL);
-               break;
+               return iscsi_switch_str_param(&session->initiatorname, buf);
        default:
                return -ENOSYS;
        }
@@ -2851,10 +2887,7 @@ int iscsi_session_get_param(struct iscsi_cls_session *cls_session,
                len = sprintf(buf, "%s\n", session->ifacename);
                break;
        case ISCSI_PARAM_INITIATOR_NAME:
-               if (!session->initiatorname)
-                       len = sprintf(buf, "%s\n", "unknown");
-               else
-                       len = sprintf(buf, "%s\n", session->initiatorname);
+               len = sprintf(buf, "%s\n", session->initiatorname);
                break;
        default:
                return -ENOSYS;
@@ -2920,29 +2953,16 @@ int iscsi_host_get_param(struct Scsi_Host *shost, enum iscsi_host_param param,
 
        switch (param) {
        case ISCSI_HOST_PARAM_NETDEV_NAME:
-               if (!ihost->netdev)
-                       len = sprintf(buf, "%s\n", "default");
-               else
-                       len = sprintf(buf, "%s\n", ihost->netdev);
+               len = sprintf(buf, "%s\n", ihost->netdev);
                break;
        case ISCSI_HOST_PARAM_HWADDRESS:
-               if (!ihost->hwaddress)
-                       len = sprintf(buf, "%s\n", "default");
-               else
-                       len = sprintf(buf, "%s\n", ihost->hwaddress);
+               len = sprintf(buf, "%s\n", ihost->hwaddress);
                break;
        case ISCSI_HOST_PARAM_INITIATOR_NAME:
-               if (!ihost->initiatorname)
-                       len = sprintf(buf, "%s\n", "unknown");
-               else
-                       len = sprintf(buf, "%s\n", ihost->initiatorname);
+               len = sprintf(buf, "%s\n", ihost->initiatorname);
                break;
        case ISCSI_HOST_PARAM_IPADDRESS:
-               if (!strlen(ihost->local_address))
-                       len = sprintf(buf, "%s\n", "unknown");
-               else
-                       len = sprintf(buf, "%s\n",
-                                     ihost->local_address);
+               len = sprintf(buf, "%s\n", ihost->local_address);
                break;
        default:
                return -ENOSYS;
@@ -2959,17 +2979,11 @@ int iscsi_host_set_param(struct Scsi_Host *shost, enum iscsi_host_param param,
 
        switch (param) {
        case ISCSI_HOST_PARAM_NETDEV_NAME:
-               if (!ihost->netdev)
-                       ihost->netdev = kstrdup(buf, GFP_KERNEL);
-               break;
+               return iscsi_switch_str_param(&ihost->netdev, buf);
        case ISCSI_HOST_PARAM_HWADDRESS:
-               if (!ihost->hwaddress)
-                       ihost->hwaddress = kstrdup(buf, GFP_KERNEL);
-               break;
+               return iscsi_switch_str_param(&ihost->hwaddress, buf);
        case ISCSI_HOST_PARAM_INITIATOR_NAME:
-               if (!ihost->initiatorname)
-                       ihost->initiatorname = kstrdup(buf, GFP_KERNEL);
-               break;
+               return iscsi_switch_str_param(&ihost->initiatorname, buf);
        default:
                return -ENOSYS;
        }
index b579ca9f4836a9c976b2c76856e1d0709650e1e4..2bc07090321da30562b3b21ef6c2b6b8dc51e887 100644 (file)
@@ -440,8 +440,8 @@ void iscsi_tcp_cleanup_task(struct iscsi_task *task)
        struct iscsi_tcp_task *tcp_task = task->dd_data;
        struct iscsi_r2t_info *r2t;
 
-       /* nothing to do for mgmt or pending tasks */
-       if (!task->sc || task->state == ISCSI_TASK_PENDING)
+       /* nothing to do for mgmt */
+       if (!task->sc)
                return;
 
        /* flush task's r2t queues */
@@ -473,7 +473,13 @@ static int iscsi_tcp_data_in(struct iscsi_conn *conn, struct iscsi_task *task)
        int datasn = be32_to_cpu(rhdr->datasn);
        unsigned total_in_length = scsi_in(task->sc)->length;
 
-       iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+       /*
+        * lib iscsi will update this in the completion handling if there
+        * is status.
+        */
+       if (!(rhdr->flags & ISCSI_FLAG_DATA_STATUS))
+               iscsi_update_cmdsn(conn->session, (struct iscsi_nopin*)rhdr);
+
        if (tcp_conn->in.datalen == 0)
                return 0;
 
@@ -857,6 +863,12 @@ int iscsi_tcp_recv_skb(struct iscsi_conn *conn, struct sk_buff *skb,
        int rc = 0;
 
        ISCSI_DBG_TCP(conn, "in %d bytes\n", skb->len - offset);
+       /*
+        * Update for each skb instead of pdu, because over slow networks a
+        * data_in's data could take a while to read in. We also want to
+        * account for r2ts.
+        */
+       conn->last_recv = jiffies;
 
        if (unlikely(conn->suspend_rx)) {
                ISCSI_DBG_TCP(conn, "Rx suspended!\n");
index 1105f9a111ba416d956f89802b71f85de2b7f447..540569849099fc88cd774b2ce2549f1f586d0196 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 
 struct lpfc_sli2_slim;
 
+#define LPFC_PCI_DEV_LP                0x1
+#define LPFC_PCI_DEV_OC                0x2
+
+#define LPFC_SLI_REV2          2
+#define LPFC_SLI_REV3          3
+#define LPFC_SLI_REV4          4
+
 #define LPFC_MAX_TARGET                4096    /* max number of targets supported */
 #define LPFC_MAX_DISC_THREADS  64      /* max outstanding discovery els
                                           requests */
@@ -98,9 +105,11 @@ struct lpfc_dma_pool {
 };
 
 struct hbq_dmabuf {
+       struct lpfc_dmabuf hbuf;
        struct lpfc_dmabuf dbuf;
        uint32_t size;
        uint32_t tag;
+       struct lpfc_rcqe rcqe;
 };
 
 /* Priority bit.  Set value to exceed low water mark in lpfc_mem. */
@@ -134,7 +143,10 @@ typedef struct lpfc_vpd {
        } rev;
        struct {
 #ifdef __BIG_ENDIAN_BITFIELD
-               uint32_t rsvd2  :24;  /* Reserved                             */
+               uint32_t rsvd3  :19;  /* Reserved                             */
+               uint32_t cdss   : 1;  /* Configure Data Security SLI          */
+               uint32_t rsvd2  : 3;  /* Reserved                             */
+               uint32_t cbg    : 1;  /* Configure BlockGuard                 */
                uint32_t cmv    : 1;  /* Configure Max VPIs                   */
                uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
                uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
@@ -152,7 +164,10 @@ typedef struct lpfc_vpd {
                uint32_t csah   : 1;  /* Configure Synchronous Abort Handling */
                uint32_t ccrp   : 1;  /* Config Command Ring Polling          */
                uint32_t cmv    : 1;  /* Configure Max VPIs                   */
-               uint32_t rsvd2  :24;  /* Reserved                             */
+               uint32_t cbg    : 1;  /* Configure BlockGuard                 */
+               uint32_t rsvd2  : 3;  /* Reserved                             */
+               uint32_t cdss   : 1;  /* Configure Data Security SLI          */
+               uint32_t rsvd3  :19;  /* Reserved                             */
 #endif
        } sli3Feat;
 } lpfc_vpd_t;
@@ -264,8 +279,8 @@ enum hba_state {
 };
 
 struct lpfc_vport {
-       struct list_head listentry;
        struct lpfc_hba *phba;
+       struct list_head listentry;
        uint8_t port_type;
 #define LPFC_PHYSICAL_PORT 1
 #define LPFC_NPIV_PORT  2
@@ -273,6 +288,9 @@ struct lpfc_vport {
        enum discovery_state port_state;
 
        uint16_t vpi;
+       uint16_t vfi;
+       uint8_t vfi_state;
+#define LPFC_VFI_REGISTERED    0x1
 
        uint32_t fc_flag;       /* FC flags */
 /* Several of these flags are HBA centric and should be moved to
@@ -385,6 +403,9 @@ struct lpfc_vport {
 #endif
        uint8_t stat_data_enabled;
        uint8_t stat_data_blocked;
+       struct list_head rcv_buffer_list;
+       uint32_t vport_flag;
+#define STATIC_VPORT   1
 };
 
 struct hbq_s {
@@ -420,8 +441,66 @@ enum intr_type_t {
 };
 
 struct lpfc_hba {
+       /* SCSI interface function jump table entries */
+       int (*lpfc_new_scsi_buf)
+               (struct lpfc_vport *, int);
+       struct lpfc_scsi_buf * (*lpfc_get_scsi_buf)
+               (struct lpfc_hba *);
+       int (*lpfc_scsi_prep_dma_buf)
+               (struct lpfc_hba *, struct lpfc_scsi_buf *);
+       void (*lpfc_scsi_unprep_dma_buf)
+               (struct lpfc_hba *, struct lpfc_scsi_buf *);
+       void (*lpfc_release_scsi_buf)
+               (struct lpfc_hba *, struct lpfc_scsi_buf *);
+       void (*lpfc_rampdown_queue_depth)
+               (struct lpfc_hba *);
+       void (*lpfc_scsi_prep_cmnd)
+               (struct lpfc_vport *, struct lpfc_scsi_buf *,
+                struct lpfc_nodelist *);
+       int (*lpfc_scsi_prep_task_mgmt_cmd)
+               (struct lpfc_vport *, struct lpfc_scsi_buf *,
+                unsigned int, uint8_t);
+
+       /* IOCB interface function jump table entries */
+       int (*__lpfc_sli_issue_iocb)
+               (struct lpfc_hba *, uint32_t,
+                struct lpfc_iocbq *, uint32_t);
+       void (*__lpfc_sli_release_iocbq)(struct lpfc_hba *,
+                        struct lpfc_iocbq *);
+       int (*lpfc_hba_down_post)(struct lpfc_hba *phba);
+
+
+       IOCB_t * (*lpfc_get_iocb_from_iocbq)
+               (struct lpfc_iocbq *);
+       void (*lpfc_scsi_cmd_iocb_cmpl)
+               (struct lpfc_hba *, struct lpfc_iocbq *, struct lpfc_iocbq *);
+
+       /* MBOX interface function jump table entries */
+       int (*lpfc_sli_issue_mbox)
+               (struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
+       /* Slow-path IOCB process function jump table entries */
+       void (*lpfc_sli_handle_slow_ring_event)
+               (struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                uint32_t mask);
+       /* INIT device interface function jump table entries */
+       int (*lpfc_sli_hbq_to_firmware)
+               (struct lpfc_hba *, uint32_t, struct hbq_dmabuf *);
+       int (*lpfc_sli_brdrestart)
+               (struct lpfc_hba *);
+       int (*lpfc_sli_brdready)
+               (struct lpfc_hba *, uint32_t);
+       void (*lpfc_handle_eratt)
+               (struct lpfc_hba *);
+       void (*lpfc_stop_port)
+               (struct lpfc_hba *);
+
+
+       /* SLI4 specific HBA data structure */
+       struct lpfc_sli4_hba sli4_hba;
+
        struct lpfc_sli sli;
-       uint32_t sli_rev;               /* SLI2 or SLI3 */
+       uint8_t pci_dev_grp;    /* lpfc PCI dev group: 0x0, 0x1, 0x2,... */
+       uint32_t sli_rev;               /* SLI2, SLI3, or SLI4 */
        uint32_t sli3_options;          /* Mask of enabled SLI3 options */
 #define LPFC_SLI3_HBQ_ENABLED          0x01
 #define LPFC_SLI3_NPIV_ENABLED         0x02
@@ -429,6 +508,7 @@ struct lpfc_hba {
 #define LPFC_SLI3_CRP_ENABLED          0x08
 #define LPFC_SLI3_INB_ENABLED          0x10
 #define LPFC_SLI3_BG_ENABLED           0x20
+#define LPFC_SLI3_DSS_ENABLED          0x40
        uint32_t iocb_cmd_size;
        uint32_t iocb_rsp_size;
 
@@ -442,8 +522,13 @@ struct lpfc_hba {
 
        uint32_t hba_flag;      /* hba generic flags */
 #define HBA_ERATT_HANDLED      0x1 /* This flag is set when eratt handled */
-
-#define DEFER_ERATT            0x4 /* Deferred error attention in progress */
+#define DEFER_ERATT            0x2 /* Deferred error attention in progress */
+#define HBA_FCOE_SUPPORT       0x4 /* HBA function supports FCOE */
+#define HBA_RECEIVE_BUFFER     0x8 /* Rcv buffer posted to worker thread */
+#define HBA_POST_RECEIVE_BUFFER 0x10 /* Rcv buffers need to be posted */
+#define FCP_XRI_ABORT_EVENT    0x20
+#define ELS_XRI_ABORT_EVENT    0x40
+#define ASYNC_EVENT            0x80
        struct lpfc_dmabuf slim2p;
 
        MAILBOX_t *mbox;
@@ -502,6 +587,9 @@ struct lpfc_hba {
        uint32_t cfg_poll;
        uint32_t cfg_poll_tmo;
        uint32_t cfg_use_msi;
+       uint32_t cfg_fcp_imax;
+       uint32_t cfg_fcp_wq_count;
+       uint32_t cfg_fcp_eq_count;
        uint32_t cfg_sg_seg_cnt;
        uint32_t cfg_prot_sg_seg_cnt;
        uint32_t cfg_sg_dma_buf_size;
@@ -511,6 +599,8 @@ struct lpfc_hba {
        uint32_t cfg_enable_hba_reset;
        uint32_t cfg_enable_hba_heartbeat;
        uint32_t cfg_enable_bg;
+       uint32_t cfg_enable_fip;
+       uint32_t cfg_log_verbose;
 
        lpfc_vpd_t vpd;         /* vital product data */
 
@@ -526,11 +616,12 @@ struct lpfc_hba {
        unsigned long data_flags;
 
        uint32_t hbq_in_use;            /* HBQs in use flag */
-       struct list_head hbqbuf_in_list;  /* in-fly hbq buffer list */
+       struct list_head rb_pend_list;  /* Received buffers to be processed */
        uint32_t hbq_count;             /* Count of configured HBQs */
        struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
 
        unsigned long pci_bar0_map;     /* Physical address for PCI BAR0 */
+       unsigned long pci_bar1_map;     /* Physical address for PCI BAR1 */
        unsigned long pci_bar2_map;     /* Physical address for PCI BAR2 */
        void __iomem *slim_memmap_p;    /* Kernel memory mapped address for
                                           PCI BAR0 */
@@ -593,7 +684,8 @@ struct lpfc_hba {
        /* pci_mem_pools */
        struct pci_pool *lpfc_scsi_dma_buf_pool;
        struct pci_pool *lpfc_mbuf_pool;
-       struct pci_pool *lpfc_hbq_pool;
+       struct pci_pool *lpfc_hrb_pool; /* header receive buffer pool */
+       struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
        struct lpfc_dma_pool lpfc_mbuf_safety_pool;
 
        mempool_t *mbox_mem_pool;
@@ -609,6 +701,14 @@ struct lpfc_hba {
        struct lpfc_vport *pport;       /* physical lpfc_vport pointer */
        uint16_t max_vpi;               /* Maximum virtual nports */
 #define LPFC_MAX_VPI 0xFFFF            /* Max number of VPI supported */
+       uint16_t max_vports;            /*
+                                        * For IOV HBAs max_vpi can change
+                                        * after a reset. max_vports is max
+                                        * number of vports present. This can
+                                        * be greater than max_vpi.
+                                        */
+       uint16_t vpi_base;
+       uint16_t vfi_base;
        unsigned long *vpi_bmask;       /* vpi allocation table */
 
        /* Data structure used by fabric iocb scheduler */
@@ -667,6 +767,11 @@ struct lpfc_hba {
 /* Maximum number of events that can be outstanding at any time*/
 #define LPFC_MAX_EVT_COUNT 512
        atomic_t fast_event_count;
+       struct lpfc_fcf fcf;
+       uint8_t fc_map[3];
+       uint8_t valid_vlan;
+       uint16_t vlan_id;
+       struct list_head fcf_conn_rec_list;
 };
 
 static inline struct Scsi_Host *
index c14f0cbdb125024372d765c746585c4c8844882b..d73e677201f8df229784fce5fc48e039dba7ae56 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -505,12 +507,14 @@ lpfc_issue_lip(struct Scsi_Host *shost)
                return -ENOMEM;
 
        memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
-       pmboxq->mb.mbxCommand = MBX_DOWN_LINK;
-       pmboxq->mb.mbxOwner = OWN_HOST;
+       pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
+       pmboxq->u.mb.mbxOwner = OWN_HOST;
 
        mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO * 2);
 
-       if ((mbxstatus == MBX_SUCCESS) && (pmboxq->mb.mbxStatus == 0)) {
+       if ((mbxstatus == MBX_SUCCESS) &&
+           (pmboxq->u.mb.mbxStatus == 0 ||
+            pmboxq->u.mb.mbxStatus == MBXERR_LINK_DOWN)) {
                memset((void *)pmboxq, 0, sizeof (LPFC_MBOXQ_t));
                lpfc_init_link(phba, pmboxq, phba->cfg_topology,
                               phba->cfg_link_speed);
@@ -789,7 +793,8 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
                  uint32_t *mrpi, uint32_t *arpi,
                  uint32_t *mvpi, uint32_t *avpi)
 {
-       struct lpfc_sli   *psli = &phba->sli;
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_mbx_read_config *rd_config;
        LPFC_MBOXQ_t *pmboxq;
        MAILBOX_t *pmb;
        int rc = 0;
@@ -800,7 +805,7 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
         */
        if (phba->link_state < LPFC_LINK_DOWN ||
            !phba->mbox_mem_pool ||
-           (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+           (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
                return 0;
 
        if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -811,13 +816,13 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
                return 0;
        memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 
-       pmb = &pmboxq->mb;
+       pmb = &pmboxq->u.mb;
        pmb->mbxCommand = MBX_READ_CONFIG;
        pmb->mbxOwner = OWN_HOST;
        pmboxq->context1 = NULL;
 
        if ((phba->pport->fc_flag & FC_OFFLINE_MODE) ||
-               (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+               (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
                rc = MBX_NOT_FINISHED;
        else
                rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -828,18 +833,37 @@ lpfc_get_hba_info(struct lpfc_hba *phba,
                return 0;
        }
 
-       if (mrpi)
-               *mrpi = pmb->un.varRdConfig.max_rpi;
-       if (arpi)
-               *arpi = pmb->un.varRdConfig.avail_rpi;
-       if (mxri)
-               *mxri = pmb->un.varRdConfig.max_xri;
-       if (axri)
-               *axri = pmb->un.varRdConfig.avail_xri;
-       if (mvpi)
-               *mvpi = pmb->un.varRdConfig.max_vpi;
-       if (avpi)
-               *avpi = pmb->un.varRdConfig.avail_vpi;
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               rd_config = &pmboxq->u.mqe.un.rd_config;
+               if (mrpi)
+                       *mrpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+               if (arpi)
+                       *arpi = bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config) -
+                                       phba->sli4_hba.max_cfg_param.rpi_used;
+               if (mxri)
+                       *mxri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+               if (axri)
+                       *axri = bf_get(lpfc_mbx_rd_conf_xri_count, rd_config) -
+                                       phba->sli4_hba.max_cfg_param.xri_used;
+               if (mvpi)
+                       *mvpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+               if (avpi)
+                       *avpi = bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config) -
+                                       phba->sli4_hba.max_cfg_param.vpi_used;
+       } else {
+               if (mrpi)
+                       *mrpi = pmb->un.varRdConfig.max_rpi;
+               if (arpi)
+                       *arpi = pmb->un.varRdConfig.avail_rpi;
+               if (mxri)
+                       *mxri = pmb->un.varRdConfig.max_xri;
+               if (axri)
+                       *axri = pmb->un.varRdConfig.avail_xri;
+               if (mvpi)
+                       *mvpi = pmb->un.varRdConfig.max_vpi;
+               if (avpi)
+                       *avpi = pmb->un.varRdConfig.avail_vpi;
+       }
 
        mempool_free(pmboxq, phba->mbox_mem_pool);
        return 1;
@@ -2021,22 +2045,9 @@ static DEVICE_ATTR(lpfc_devloss_tmo, S_IRUGO | S_IWUSR,
 # lpfc_log_verbose: Only turn this flag on if you are willing to risk being
 # deluged with LOTS of information.
 # You can set a bit mask to record specific types of verbose messages:
-#
-# LOG_ELS                       0x1        ELS events
-# LOG_DISCOVERY                 0x2        Link discovery events
-# LOG_MBOX                      0x4        Mailbox events
-# LOG_INIT                      0x8        Initialization events
-# LOG_LINK_EVENT                0x10       Link events
-# LOG_FCP                       0x40       FCP traffic history
-# LOG_NODE                      0x80       Node table events
-# LOG_BG                        0x200      BlockBuard events
-# LOG_MISC                      0x400      Miscellaneous events
-# LOG_SLI                       0x800      SLI events
-# LOG_FCP_ERROR                 0x1000     Only log FCP errors
-# LOG_LIBDFC                    0x2000     LIBDFC events
-# LOG_ALL_MSG                   0xffff     LOG all messages
+# See lpfc_logmsh.h for definitions.
 */
-LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffff,
+LPFC_VPORT_ATTR_HEX_RW(log_verbose, 0x0, 0x0, 0xffffffff,
                       "Verbose logging bit-mask");
 
 /*
@@ -2266,6 +2277,36 @@ lpfc_param_init(topology, 0, 0, 6)
 static DEVICE_ATTR(lpfc_topology, S_IRUGO | S_IWUSR,
                lpfc_topology_show, lpfc_topology_store);
 
+/**
+ * lpfc_static_vport_show: Read callback function for
+ *   lpfc_static_vport sysfs file.
+ * @dev: Pointer to class device object.
+ * @attr: device attribute structure.
+ * @buf: Data buffer.
+ *
+ * This function is the read call back function for
+ * lpfc_static_vport sysfs file. The lpfc_static_vport
+ * sysfs file report the mageability of the vport.
+ **/
+static ssize_t
+lpfc_static_vport_show(struct device *dev, struct device_attribute *attr,
+                        char *buf)
+{
+       struct Scsi_Host  *shost = class_to_shost(dev);
+       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+       if (vport->vport_flag & STATIC_VPORT)
+               sprintf(buf, "1\n");
+       else
+               sprintf(buf, "0\n");
+
+       return strlen(buf);
+}
+
+/*
+ * Sysfs attribute to control the statistical data collection.
+ */
+static DEVICE_ATTR(lpfc_static_vport, S_IRUGO,
+                  lpfc_static_vport_show, NULL);
 
 /**
  * lpfc_stat_data_ctrl_store - write call back for lpfc_stat_data_ctrl sysfs file
@@ -2341,7 +2382,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
                if (vports == NULL)
                        return -ENOMEM;
 
-               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        v_shost = lpfc_shost_from_vport(vports[i]);
                        spin_lock_irq(v_shost->host_lock);
                        /* Block and reset data collection */
@@ -2356,7 +2397,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
                phba->bucket_base = base;
                phba->bucket_step = step;
 
-               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        v_shost = lpfc_shost_from_vport(vports[i]);
 
                        /* Unblock data collection */
@@ -2373,7 +2414,7 @@ lpfc_stat_data_ctrl_store(struct device *dev, struct device_attribute *attr,
                if (vports == NULL)
                        return -ENOMEM;
 
-               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        v_shost = lpfc_shost_from_vport(vports[i]);
                        spin_lock_irq(shost->host_lock);
                        vports[i]->stat_data_blocked = 1;
@@ -2844,14 +2885,38 @@ LPFC_ATTR_RW(poll_tmo, 10, 1, 255,
 /*
 # lpfc_use_msi: Use MSI (Message Signaled Interrupts) in systems that
 #              support this feature
-#       0  = MSI disabled
+#       0  = MSI disabled (default)
 #       1  = MSI enabled
-#       2  = MSI-X enabled (default)
-# Value range is [0,2]. Default value is 2.
+#       2  = MSI-X enabled
+# Value range is [0,2]. Default value is 0.
 */
-LPFC_ATTR_R(use_msi, 2, 0, 2, "Use Message Signaled Interrupts (1) or "
+LPFC_ATTR_R(use_msi, 0, 0, 2, "Use Message Signaled Interrupts (1) or "
            "MSI-X (2), if possible");
 
+/*
+# lpfc_fcp_imax: Set the maximum number of fast-path FCP interrupts per second
+#
+# Value range is [636,651042]. Default value is 10000.
+*/
+LPFC_ATTR_R(fcp_imax, LPFC_FP_DEF_IMAX, LPFC_MIM_IMAX, LPFC_DMULT_CONST,
+           "Set the maximum number of fast-path FCP interrupts per second");
+
+/*
+# lpfc_fcp_wq_count: Set the number of fast-path FCP work queues
+#
+# Value range is [1,31]. Default value is 4.
+*/
+LPFC_ATTR_R(fcp_wq_count, LPFC_FP_WQN_DEF, LPFC_FP_WQN_MIN, LPFC_FP_WQN_MAX,
+           "Set the number of fast-path FCP work queues, if possible");
+
+/*
+# lpfc_fcp_eq_count: Set the number of fast-path FCP event queues
+#
+# Value range is [1,7]. Default value is 1.
+*/
+LPFC_ATTR_R(fcp_eq_count, LPFC_FP_EQN_DEF, LPFC_FP_EQN_MIN, LPFC_FP_EQN_MAX,
+           "Set the number of fast-path FCP event queues, if possible");
+
 /*
 # lpfc_enable_hba_reset: Allow or prevent HBA resets to the hardware.
 #       0  = HBA resets disabled
@@ -2876,6 +2941,14 @@ LPFC_ATTR_R(enable_hba_heartbeat, 1, 0, 1, "Enable HBA Heartbeat.");
 */
 LPFC_ATTR_R(enable_bg, 0, 0, 1, "Enable BlockGuard Support");
 
+/*
+# lpfc_enable_fip: When set, FIP is required to start discovery. If not
+# set, the driver will add an FCF record manually if the port has no
+# FCF records available and start discovery.
+# Value range is [0,1]. Default value is 1 (enabled)
+*/
+LPFC_ATTR_RW(enable_fip, 0, 0, 1, "Enable FIP Discovery");
+
 
 /*
 # lpfc_prot_mask: i
@@ -2942,6 +3015,7 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_peer_port_login,
        &dev_attr_lpfc_nodev_tmo,
        &dev_attr_lpfc_devloss_tmo,
+       &dev_attr_lpfc_enable_fip,
        &dev_attr_lpfc_fcp_class,
        &dev_attr_lpfc_use_adisc,
        &dev_attr_lpfc_ack0,
@@ -2969,6 +3043,9 @@ struct device_attribute *lpfc_hba_attrs[] = {
        &dev_attr_lpfc_poll,
        &dev_attr_lpfc_poll_tmo,
        &dev_attr_lpfc_use_msi,
+       &dev_attr_lpfc_fcp_imax,
+       &dev_attr_lpfc_fcp_wq_count,
+       &dev_attr_lpfc_fcp_eq_count,
        &dev_attr_lpfc_enable_bg,
        &dev_attr_lpfc_soft_wwnn,
        &dev_attr_lpfc_soft_wwpn,
@@ -2991,6 +3068,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
        &dev_attr_lpfc_lun_queue_depth,
        &dev_attr_lpfc_nodev_tmo,
        &dev_attr_lpfc_devloss_tmo,
+       &dev_attr_lpfc_enable_fip,
        &dev_attr_lpfc_hba_queue_depth,
        &dev_attr_lpfc_peer_port_login,
        &dev_attr_lpfc_restrict_login,
@@ -3003,6 +3081,7 @@ struct device_attribute *lpfc_vport_attrs[] = {
        &dev_attr_lpfc_enable_da_id,
        &dev_attr_lpfc_max_scsicmpl_time,
        &dev_attr_lpfc_stat_data_ctrl,
+       &dev_attr_lpfc_static_vport,
        NULL,
 };
 
@@ -3199,7 +3278,7 @@ sysfs_mbox_write(struct kobject *kobj, struct bin_attribute *bin_attr,
                }
        }
 
-       memcpy((uint8_t *) & phba->sysfs_mbox.mbox->mb + off,
+       memcpy((uint8_t *) &phba->sysfs_mbox.mbox->u.mb + off,
               buf, count);
 
        phba->sysfs_mbox.offset = off + count;
@@ -3241,6 +3320,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
        int rc;
+       MAILBOX_t *pmb;
 
        if (off > MAILBOX_CMD_SIZE)
                return -ERANGE;
@@ -3265,8 +3345,8 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
        if (off == 0 &&
            phba->sysfs_mbox.state  == SMBOX_WRITING &&
            phba->sysfs_mbox.offset >= 2 * sizeof(uint32_t)) {
-
-               switch (phba->sysfs_mbox.mbox->mb.mbxCommand) {
+               pmb = &phba->sysfs_mbox.mbox->u.mb;
+               switch (pmb->mbxCommand) {
                        /* Offline only */
                case MBX_INIT_LINK:
                case MBX_DOWN_LINK:
@@ -3283,7 +3363,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
                        if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
                                printk(KERN_WARNING "mbox_read:Command 0x%x "
                                       "is illegal in on-line state\n",
-                                      phba->sysfs_mbox.mbox->mb.mbxCommand);
+                                      pmb->mbxCommand);
                                sysfs_mbox_idle(phba);
                                spin_unlock_irq(&phba->hbalock);
                                return -EPERM;
@@ -3319,13 +3399,13 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
                case MBX_CONFIG_PORT:
                case MBX_RUN_BIU_DIAG:
                        printk(KERN_WARNING "mbox_read: Illegal Command 0x%x\n",
-                              phba->sysfs_mbox.mbox->mb.mbxCommand);
+                              pmb->mbxCommand);
                        sysfs_mbox_idle(phba);
                        spin_unlock_irq(&phba->hbalock);
                        return -EPERM;
                default:
                        printk(KERN_WARNING "mbox_read: Unknown Command 0x%x\n",
-                              phba->sysfs_mbox.mbox->mb.mbxCommand);
+                              pmb->mbxCommand);
                        sysfs_mbox_idle(phba);
                        spin_unlock_irq(&phba->hbalock);
                        return -EPERM;
@@ -3335,14 +3415,14 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
                 * or RESTART mailbox commands until the HBA is restarted.
                 */
                if (phba->pport->stopped &&
-                   phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_DUMP_MEMORY &&
-                   phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_RESTART &&
-                   phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_VPARMS &&
-                   phba->sysfs_mbox.mbox->mb.mbxCommand != MBX_WRITE_WWN)
+                   pmb->mbxCommand != MBX_DUMP_MEMORY &&
+                   pmb->mbxCommand != MBX_RESTART &&
+                   pmb->mbxCommand != MBX_WRITE_VPARMS &&
+                   pmb->mbxCommand != MBX_WRITE_WWN)
                        lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
                                        "1259 mbox: Issued mailbox cmd "
                                        "0x%x while in stopped state.\n",
-                                       phba->sysfs_mbox.mbox->mb.mbxCommand);
+                                       pmb->mbxCommand);
 
                phba->sysfs_mbox.mbox->vport = vport;
 
@@ -3356,7 +3436,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
                }
 
                if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-                   (!(phba->sli.sli_flag & LPFC_SLI2_ACTIVE))){
+                   (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
 
                        spin_unlock_irq(&phba->hbalock);
                        rc = lpfc_sli_issue_mbox (phba,
@@ -3368,8 +3448,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
                        spin_unlock_irq(&phba->hbalock);
                        rc = lpfc_sli_issue_mbox_wait (phba,
                                                       phba->sysfs_mbox.mbox,
-                               lpfc_mbox_tmo_val(phba,
-                                   phba->sysfs_mbox.mbox->mb.mbxCommand) * HZ);
+                               lpfc_mbox_tmo_val(phba, pmb->mbxCommand) * HZ);
                        spin_lock_irq(&phba->hbalock);
                }
 
@@ -3391,7 +3470,7 @@ sysfs_mbox_read(struct kobject *kobj, struct bin_attribute *bin_attr,
                return -EAGAIN;
        }
 
-       memcpy(buf, (uint8_t *) & phba->sysfs_mbox.mbox->mb + off, count);
+       memcpy(buf, (uint8_t *) &pmb + off, count);
 
        phba->sysfs_mbox.offset = off + count;
 
@@ -3585,6 +3664,9 @@ lpfc_get_host_speed(struct Scsi_Host *shost)
                        case LA_8GHZ_LINK:
                                fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
                        break;
+                       case LA_10GHZ_LINK:
+                               fc_host_speed(shost) = FC_PORTSPEED_10GBIT;
+                       break;
                        default:
                                fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
                        break;
@@ -3652,7 +3734,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
         */
        if (phba->link_state < LPFC_LINK_DOWN ||
            !phba->mbox_mem_pool ||
-           (phba->sli.sli_flag & LPFC_SLI2_ACTIVE) == 0)
+           (phba->sli.sli_flag & LPFC_SLI_ACTIVE) == 0)
                return NULL;
 
        if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO)
@@ -3663,14 +3745,14 @@ lpfc_get_stats(struct Scsi_Host *shost)
                return NULL;
        memset(pmboxq, 0, sizeof (LPFC_MBOXQ_t));
 
-       pmb = &pmboxq->mb;
+       pmb = &pmboxq->u.mb;
        pmb->mbxCommand = MBX_READ_STATUS;
        pmb->mbxOwner = OWN_HOST;
        pmboxq->context1 = NULL;
        pmboxq->vport = vport;
 
        if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-               (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+               (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
                rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
        else
                rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3695,7 +3777,7 @@ lpfc_get_stats(struct Scsi_Host *shost)
        pmboxq->vport = vport;
 
        if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-           (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+           (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
                rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
        else
                rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3769,7 +3851,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
                return;
        memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
 
-       pmb = &pmboxq->mb;
+       pmb = &pmboxq->u.mb;
        pmb->mbxCommand = MBX_READ_STATUS;
        pmb->mbxOwner = OWN_HOST;
        pmb->un.varWords[0] = 0x1; /* reset request */
@@ -3777,7 +3859,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
        pmboxq->vport = vport;
 
        if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-               (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+               (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
                rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
        else
                rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3795,7 +3877,7 @@ lpfc_reset_stats(struct Scsi_Host *shost)
        pmboxq->vport = vport;
 
        if ((vport->fc_flag & FC_OFFLINE_MODE) ||
-           (!(psli->sli_flag & LPFC_SLI2_ACTIVE)))
+           (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
                rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
        else
                rc = lpfc_sli_issue_mbox_wait(phba, pmboxq, phba->fc_ratov * 2);
@@ -3962,6 +4044,21 @@ lpfc_set_vport_symbolic_name(struct fc_vport *fc_vport)
                lpfc_ns_cmd(vport, SLI_CTNS_RSPN_ID, 0, 0);
 }
 
+/**
+ * lpfc_hba_log_verbose_init - Set hba's log verbose level
+ * @phba: Pointer to lpfc_hba struct.
+ *
+ * This function is called by the lpfc_get_cfgparam() routine to set the
+ * module lpfc_log_verbose into the @phba cfg_log_verbose for use with
+ * log messsage according to the module's lpfc_log_verbose parameter setting
+ * before hba port or vport created.
+ **/
+static void
+lpfc_hba_log_verbose_init(struct lpfc_hba *phba, uint32_t verbose)
+{
+       phba->cfg_log_verbose = verbose;
+}
+
 struct fc_function_template lpfc_transport_functions = {
        /* fixed attributes the driver supports */
        .show_host_node_name = 1,
@@ -4105,6 +4202,9 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        lpfc_poll_tmo_init(phba, lpfc_poll_tmo);
        lpfc_enable_npiv_init(phba, lpfc_enable_npiv);
        lpfc_use_msi_init(phba, lpfc_use_msi);
+       lpfc_fcp_imax_init(phba, lpfc_fcp_imax);
+       lpfc_fcp_wq_count_init(phba, lpfc_fcp_wq_count);
+       lpfc_fcp_eq_count_init(phba, lpfc_fcp_eq_count);
        lpfc_enable_hba_reset_init(phba, lpfc_enable_hba_reset);
        lpfc_enable_hba_heartbeat_init(phba, lpfc_enable_hba_heartbeat);
        lpfc_enable_bg_init(phba, lpfc_enable_bg);
@@ -4113,26 +4213,10 @@ lpfc_get_cfgparam(struct lpfc_hba *phba)
        phba->cfg_soft_wwpn = 0L;
        lpfc_sg_seg_cnt_init(phba, lpfc_sg_seg_cnt);
        lpfc_prot_sg_seg_cnt_init(phba, lpfc_prot_sg_seg_cnt);
-       /*
-        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
-        * used to create the sg_dma_buf_pool must be dynamically calculated.
-        * 2 segments are added since the IOCB needs a command and response bde.
-        */
-       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
-                       sizeof(struct fcp_rsp) +
-                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
-
-       if (phba->cfg_enable_bg) {
-               phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
-               phba->cfg_sg_dma_buf_size +=
-                       phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
-       }
-
-       /* Also reinitialize the host templates with new values. */
-       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
-
        lpfc_hba_queue_depth_init(phba, lpfc_hba_queue_depth);
+       lpfc_enable_fip_init(phba, lpfc_enable_fip);
+       lpfc_hba_log_verbose_init(phba, lpfc_log_verbose);
+
        return;
 }
 
index f88ce3f261900c50fce0d33a0abff80ade5d6ad9..d2a922997c0fa17faf05299fd4a85c4894e39e7c 100644 (file)
@@ -23,6 +23,8 @@ typedef int (*node_filter)(struct lpfc_nodelist *, void *);
 struct fc_rport;
 void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
 void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
@@ -35,17 +37,19 @@ int lpfc_config_msi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 int lpfc_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *, int);
 void lpfc_read_config(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_read_lnk_stat(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_reg_login(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
-                  LPFC_MBOXQ_t *, uint32_t);
+int lpfc_reg_rpi(struct lpfc_hba *, uint16_t, uint32_t, uint8_t *,
+                LPFC_MBOXQ_t *, uint32_t);
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
-void lpfc_reg_vpi(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
 void lpfc_unreg_vpi(struct lpfc_hba *, uint16_t, LPFC_MBOXQ_t *);
 void lpfc_init_link(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
+void lpfc_request_features(struct lpfc_hba *, struct lpfcMboxq *);
 
 struct lpfc_vport *lpfc_find_vport_by_did(struct lpfc_hba *, uint32_t);
 void lpfc_cleanup_rpis(struct lpfc_vport *, int);
 int lpfc_linkdown(struct lpfc_hba *);
+void lpfc_linkdown_port(struct lpfc_vport *);
 void lpfc_port_link_failure(struct lpfc_vport *);
 void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
 
@@ -54,6 +58,7 @@ void lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_enqueue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 void lpfc_dequeue_node(struct lpfc_vport *, struct lpfc_nodelist *);
 struct lpfc_nodelist *lpfc_enable_node(struct lpfc_vport *,
@@ -105,6 +110,7 @@ int lpfc_issue_els_adisc(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
 int lpfc_issue_els_logo(struct lpfc_vport *, struct lpfc_nodelist *, uint8_t);
 int lpfc_issue_els_npiv_logo(struct lpfc_vport *, struct lpfc_nodelist *);
 int lpfc_issue_els_scr(struct lpfc_vport *, uint32_t, uint8_t);
+int lpfc_issue_fabric_reglogin(struct lpfc_vport *);
 int lpfc_els_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_ct_free_iocb(struct lpfc_hba *, struct lpfc_iocbq *);
 int lpfc_els_rsp_acc(struct lpfc_vport *, uint32_t, struct lpfc_iocbq *,
@@ -149,15 +155,19 @@ int lpfc_online(struct lpfc_hba *);
 void lpfc_unblock_mgmt_io(struct lpfc_hba *);
 void lpfc_offline_prep(struct lpfc_hba *);
 void lpfc_offline(struct lpfc_hba *);
+void lpfc_reset_hba(struct lpfc_hba *);
 
 int lpfc_sli_setup(struct lpfc_hba *);
 int lpfc_sli_queue_setup(struct lpfc_hba *);
 
 void lpfc_handle_eratt(struct lpfc_hba *);
 void lpfc_handle_latt(struct lpfc_hba *);
-irqreturn_t lpfc_intr_handler(int, void *);
-irqreturn_t lpfc_sp_intr_handler(int, void *);
-irqreturn_t lpfc_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_intr_handler(int, void *);
+irqreturn_t lpfc_sli_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli_fp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_sp_intr_handler(int, void *);
+irqreturn_t lpfc_sli4_fp_intr_handler(int, void *);
 
 void lpfc_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_ring(struct lpfc_hba *, int, LPFC_MBOXQ_t *);
@@ -165,16 +175,32 @@ void lpfc_config_port(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_kill_board(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbox_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
 LPFC_MBOXQ_t *lpfc_mbox_get(struct lpfc_hba *);
+void __lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_mbox_cmpl_put(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_cmd_check(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_mbox_dev_check(struct lpfc_hba *);
 int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
+void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
+void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
+void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
+void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
+void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
+void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
 
 void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
        uint32_t , LPFC_MBOXQ_t *);
 struct hbq_dmabuf *lpfc_els_hbq_alloc(struct lpfc_hba *);
 void lpfc_els_hbq_free(struct lpfc_hba *, struct hbq_dmabuf *);
+struct hbq_dmabuf *lpfc_sli4_rb_alloc(struct lpfc_hba *);
+void lpfc_sli4_rb_free(struct lpfc_hba *, struct hbq_dmabuf *);
+void lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *, struct fcf_record *,
+                       uint16_t);
+void lpfc_unregister_unused_fcf(struct lpfc_hba *);
 
-int lpfc_mem_alloc(struct lpfc_hba *);
+int lpfc_mem_alloc(struct lpfc_hba *, int align);
 void lpfc_mem_free(struct lpfc_hba *);
+void lpfc_mem_free_all(struct lpfc_hba *);
 void lpfc_stop_vport_timers(struct lpfc_vport *);
 
 void lpfc_poll_timeout(unsigned long ptr);
@@ -186,6 +212,7 @@ void lpfc_sli_release_iocbq(struct lpfc_hba *, struct lpfc_iocbq *);
 uint16_t lpfc_sli_next_iotag(struct lpfc_hba *, struct lpfc_iocbq *);
 void lpfc_sli_cancel_iocbs(struct lpfc_hba *, struct list_head *, uint32_t,
                           uint32_t);
+void lpfc_sli_wake_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *);
 
 void lpfc_reset_barrier(struct lpfc_hba * phba);
 int lpfc_sli_brdready(struct lpfc_hba *, uint32_t);
@@ -198,12 +225,13 @@ int lpfc_sli_host_down(struct lpfc_vport *);
 int lpfc_sli_hba_down(struct lpfc_hba *);
 int lpfc_sli_issue_mbox(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 int lpfc_sli_handle_mb_event(struct lpfc_hba *);
-int lpfc_sli_flush_mbox_queue(struct lpfc_hba *);
+void lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *);
 int lpfc_sli_check_eratt(struct lpfc_hba *);
-int lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
+void lpfc_sli_handle_slow_ring_event(struct lpfc_hba *,
                                    struct lpfc_sli_ring *, uint32_t);
+int lpfc_sli4_handle_received_buffer(struct lpfc_hba *);
 void lpfc_sli_def_mbox_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
-int lpfc_sli_issue_iocb(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
                        struct lpfc_iocbq *, uint32_t);
 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
@@ -237,7 +265,7 @@ struct lpfc_nodelist *lpfc_findnode_wwpn(struct lpfc_vport *,
 
 int lpfc_sli_issue_mbox_wait(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
 
-int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, struct lpfc_sli_ring *,
+int lpfc_sli_issue_iocb_wait(struct lpfc_hba *, uint32_t,
                             struct lpfc_iocbq *, struct lpfc_iocbq *,
                             uint32_t);
 void lpfc_sli_abort_fcp_cmpl(struct lpfc_hba *, struct lpfc_iocbq *,
@@ -254,6 +282,12 @@ void lpfc_in_buf_free(struct lpfc_hba *, struct lpfc_dmabuf *);
 const char* lpfc_info(struct Scsi_Host *);
 int lpfc_scan_finished(struct Scsi_Host *, unsigned long);
 
+int lpfc_init_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_sli_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_scsi_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_mbox_api_table_setup(struct lpfc_hba *, uint8_t);
+int lpfc_api_table_setup(struct lpfc_hba *, uint8_t);
+
 void lpfc_get_cfgparam(struct lpfc_hba *);
 void lpfc_get_vport_cfgparam(struct lpfc_vport *);
 int lpfc_alloc_sysfs_attr(struct lpfc_vport *);
@@ -314,8 +348,15 @@ lpfc_send_els_failure_event(struct lpfc_hba *, struct lpfc_iocbq *,
                                struct lpfc_iocbq *);
 struct lpfc_fast_path_event *lpfc_alloc_fast_evt(struct lpfc_hba *);
 void lpfc_free_fast_evt(struct lpfc_hba *, struct lpfc_fast_path_event *);
+void lpfc_create_static_vport(struct lpfc_hba *);
+void lpfc_stop_hba_timers(struct lpfc_hba *);
+void lpfc_stop_port(struct lpfc_hba *);
+void lpfc_parse_fcoe_conf(struct lpfc_hba *, uint8_t *, uint32_t);
+int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
+void lpfc_start_fdiscs(struct lpfc_hba *phba);
 
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define HBA_EVENT_RSCN                   5
 #define HBA_EVENT_LINK_UP                2
 #define HBA_EVENT_LINK_DOWN              3
+
index 896c7b0351e51dcc2c529056442982129d0d02bf..1dbccfd3d022a6339bad81a04f291ec41abe89fb 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -267,8 +269,6 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
             uint32_t tmo, uint8_t retry)
 {
        struct lpfc_hba  *phba = vport->phba;
-       struct lpfc_sli  *psli = &phba->sli;
-       struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
        IOCB_t *icmd;
        struct lpfc_iocbq *geniocb;
        int rc;
@@ -331,7 +331,7 @@ lpfc_gen_req(struct lpfc_vport *vport, struct lpfc_dmabuf *bmp,
        geniocb->drvrTimeout = icmd->ulpTimeout + LPFC_DRVR_TIMEOUT;
        geniocb->vport = vport;
        geniocb->retry = retry;
-       rc = lpfc_sli_issue_iocb(phba, pring, geniocb, 0);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, geniocb, 0);
 
        if (rc == IOCB_ERROR) {
                lpfc_sli_release_iocbq(phba, geniocb);
@@ -1578,6 +1578,9 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp, int cmdcode)
                                case LA_8GHZ_LINK:
                                        ae->un.PortSpeed = HBA_PORTSPEED_8GBIT;
                                break;
+                               case LA_10GHZ_LINK:
+                                       ae->un.PortSpeed = HBA_PORTSPEED_10GBIT;
+                               break;
                                default:
                                        ae->un.PortSpeed =
                                                HBA_PORTSPEED_UNKNOWN;
@@ -1730,7 +1733,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
        uint8_t *fwname;
 
        if (vp->rev.rBit) {
-               if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+               if (psli->sli_flag & LPFC_SLI_ACTIVE)
                        rev = vp->rev.sli2FwRev;
                else
                        rev = vp->rev.sli1FwRev;
@@ -1756,7 +1759,7 @@ lpfc_decode_firmware_rev(struct lpfc_hba *phba, char *fwrevision, int flag)
                }
                b4 = (rev & 0x0000000f);
 
-               if (psli->sli_flag & LPFC_SLI2_ACTIVE)
+               if (psli->sli_flag & LPFC_SLI_ACTIVE)
                        fwname = vp->rev.sli2FwName;
                else
                        fwname = vp->rev.sli1FwName;
index 52be5644e07ad6a9d6538c68a78e6ebe907e3f26..2b02b1fb39a09842a1b18216699ac31b83eef3bb 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2007-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2007-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -280,6 +282,8 @@ lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
        struct lpfc_dmabuf *d_buf;
        struct hbq_dmabuf *hbq_buf;
 
+       if (phba->sli_rev != 3)
+               return 0;
        cnt = LPFC_HBQINFO_SIZE;
        spin_lock_irq(&phba->hbalock);
 
@@ -489,12 +493,15 @@ lpfc_debugfs_dumpHostSlim_data(struct lpfc_hba *phba, char *buf, int size)
                                 pring->next_cmdidx, pring->local_getidx,
                                 pring->flag, pgpp->rspPutInx, pring->numRiocb);
        }
-       word0 = readl(phba->HAregaddr);
-       word1 = readl(phba->CAregaddr);
-       word2 = readl(phba->HSregaddr);
-       word3 = readl(phba->HCregaddr);
-       len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x HC:%08x\n",
-       word0, word1, word2, word3);
+
+       if (phba->sli_rev <= LPFC_SLI_REV3) {
+               word0 = readl(phba->HAregaddr);
+               word1 = readl(phba->CAregaddr);
+               word2 = readl(phba->HSregaddr);
+               word3 = readl(phba->HCregaddr);
+               len +=  snprintf(buf+len, size-len, "HA:%08x CA:%08x HS:%08x "
+                                "HC:%08x\n", word0, word1, word2, word3);
+       }
        spin_unlock_irq(&phba->hbalock);
        return len;
 }
index ffd1089720728a7732a3c8f0707e068138ca71b6..1142070e948424c8f6b678ef5b5b93cc500dfb6b 100644 (file)
@@ -135,6 +135,7 @@ struct lpfc_nodelist {
 #define NLP_NODEV_REMOVE   0x08000000  /* Defer removal till discovery ends */
 #define NLP_TARGET_REMOVE  0x10000000   /* Target remove in process */
 #define NLP_SC_REQ         0x20000000  /* Target requires authentication */
+#define NLP_RPI_VALID      0x80000000  /* nlp_rpi is valid */
 
 /* ndlp usage management macros */
 #define NLP_CHK_NODE_ACT(ndlp)         (((ndlp)->nlp_usg_map \
index b8b34cf5c3d2fd2129c24a16c9fdb08d12229884..6bdeb14878a2b094eb7f175bea7982cdd337b957 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -84,7 +86,8 @@ lpfc_els_chk_latt(struct lpfc_vport *vport)
        uint32_t ha_copy;
 
        if (vport->port_state >= LPFC_VPORT_READY ||
-           phba->link_state == LPFC_LINK_DOWN)
+           phba->link_state == LPFC_LINK_DOWN ||
+           phba->sli_rev > LPFC_SLI_REV3)
                return 0;
 
        /* Read the HBA Host Attention Register */
@@ -219,7 +222,7 @@ lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
                icmd->un.elsreq64.myID = vport->fc_myDID;
 
                /* For ELS_REQUEST64_CR, use the VPI by default */
-               icmd->ulpContext = vport->vpi;
+               icmd->ulpContext = vport->vpi + phba->vpi_base;
                icmd->ulpCt_h = 0;
                /* The CT field must be 0=INVALID_RPI for the ECHO cmd */
                if (elscmd == ELS_CMD_ECHO)
@@ -305,7 +308,7 @@ els_iocb_free_pcmb_exit:
  *   0 - successfully issued fabric registration login for @vport
  *   -ENXIO -- failed to issue fabric registration login for @vport
  **/
-static int
+int
 lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
 {
        struct lpfc_hba  *phba = vport->phba;
@@ -345,8 +348,7 @@ lpfc_issue_fabric_reglogin(struct lpfc_vport *vport)
                err = 4;
                goto fail;
        }
-       rc = lpfc_reg_login(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox,
-                           0);
+       rc = lpfc_reg_rpi(phba, vport->vpi, Fabric_DID, (uint8_t *)sp, mbox, 0);
        if (rc) {
                err = 5;
                goto fail_free_mbox;
@@ -385,6 +387,75 @@ fail:
        return -ENXIO;
 }
 
+/**
+ * lpfc_issue_reg_vfi - Register VFI for this vport's fabric login
+ * @vport: pointer to a host virtual N_Port data structure.
+ *
+ * This routine issues a REG_VFI mailbox for the vfi, vpi, fcfi triplet for
+ * the @vport. This mailbox command is necessary for FCoE only.
+ *
+ * Return code
+ *   0 - successfully issued REG_VFI for @vport
+ *   A failure code otherwise.
+ **/
+static int
+lpfc_issue_reg_vfi(struct lpfc_vport *vport)
+{
+       struct lpfc_hba  *phba = vport->phba;
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_nodelist *ndlp;
+       struct serv_parm *sp;
+       struct lpfc_dmabuf *dmabuf;
+       int rc = 0;
+
+       sp = &phba->fc_fabparam;
+       ndlp = lpfc_findnode_did(vport, Fabric_DID);
+       if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
+               rc = -ENODEV;
+               goto fail;
+       }
+
+       dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (!dmabuf) {
+               rc = -ENOMEM;
+               goto fail;
+       }
+       dmabuf->virt = lpfc_mbuf_alloc(phba, MEM_PRI, &dmabuf->phys);
+       if (!dmabuf->virt) {
+               rc = -ENOMEM;
+               goto fail_free_dmabuf;
+       }
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               rc = -ENOMEM;
+               goto fail_free_coherent;
+       }
+       vport->port_state = LPFC_FABRIC_CFG_LINK;
+       memcpy(dmabuf->virt, &phba->fc_fabparam, sizeof(vport->fc_sparam));
+       lpfc_reg_vfi(mboxq, vport, dmabuf->phys);
+       mboxq->mbox_cmpl = lpfc_mbx_cmpl_reg_vfi;
+       mboxq->vport = vport;
+       mboxq->context1 = dmabuf;
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED) {
+               rc = -ENXIO;
+               goto fail_free_mbox;
+       }
+       return 0;
+
+fail_free_mbox:
+       mempool_free(mboxq, phba->mbox_mem_pool);
+fail_free_coherent:
+       lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+fail_free_dmabuf:
+       kfree(dmabuf);
+fail:
+       lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+       lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+               "0289 Issue Register VFI failed: Err %d\n", rc);
+       return rc;
+}
+
 /**
  * lpfc_cmpl_els_flogi_fabric - Completion function for flogi to a fabric port
  * @vport: pointer to a host virtual N_Port data structure.
@@ -497,17 +568,24 @@ lpfc_cmpl_els_flogi_fabric(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                }
        }
 
-       lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
-
-       if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
-           vport->fc_flag & FC_VPORT_NEEDS_REG_VPI) {
-               lpfc_register_new_vport(phba, vport, ndlp);
-               return 0;
+       if (phba->sli_rev < LPFC_SLI_REV4) {
+               lpfc_nlp_set_state(vport, ndlp, NLP_STE_REG_LOGIN_ISSUE);
+               if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED &&
+                   vport->fc_flag & FC_VPORT_NEEDS_REG_VPI)
+                       lpfc_register_new_vport(phba, vport, ndlp);
+               else
+                       lpfc_issue_fabric_reglogin(vport);
+       } else {
+               ndlp->nlp_type |= NLP_FABRIC;
+               lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
+               if (vport->vfi_state & LPFC_VFI_REGISTERED) {
+                       lpfc_start_fdiscs(phba);
+                       lpfc_do_scr_ns_plogi(phba, vport);
+               } else
+                       lpfc_issue_reg_vfi(vport);
        }
-       lpfc_issue_fabric_reglogin(vport);
        return 0;
 }
-
 /**
  * lpfc_cmpl_els_flogi_nport - Completion function for flogi to an N_Port
  * @vport: pointer to a host virtual N_Port data structure.
@@ -815,9 +893,14 @@ lpfc_issue_els_flogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (sp->cmn.fcphHigh < FC_PH3)
                sp->cmn.fcphHigh = FC_PH3;
 
-       if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
+       if  (phba->sli_rev == LPFC_SLI_REV4) {
+               elsiocb->iocb.ulpCt_h = ((SLI4_CT_FCFI >> 1) & 1);
+               elsiocb->iocb.ulpCt_l = (SLI4_CT_FCFI & 1);
+               /* FLOGI needs to be 3 for WQE FCFI */
+               /* Set the fcfi to the fcfi we registered with */
+               elsiocb->iocb.ulpContext = phba->fcf.fcfi;
+       } else if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
                sp->cmn.request_multiple_Nport = 1;
-
                /* For FLOGI, Let FLOGI rsp set the NPortID for VPI 0 */
                icmd->ulpCt_h = 1;
                icmd->ulpCt_l = 0;
@@ -930,6 +1013,8 @@ lpfc_initial_flogi(struct lpfc_vport *vport)
                if (!ndlp)
                        return 0;
                lpfc_nlp_init(vport, ndlp, Fabric_DID);
+               /* Set the node type */
+               ndlp->nlp_type |= NLP_FABRIC;
                /* Put ndlp onto node list */
                lpfc_enqueue_node(vport, ndlp);
        } else if (!NLP_CHK_NODE_ACT(ndlp)) {
@@ -1350,14 +1435,12 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
        IOCB_t *icmd;
        struct lpfc_nodelist *ndlp;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
        struct lpfc_sli *psli;
        uint8_t *pcmd;
        uint16_t cmdsize;
        int ret;
 
        psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
 
        ndlp = lpfc_findnode_did(vport, did);
        if (ndlp && !NLP_CHK_NODE_ACT(ndlp))
@@ -1391,7 +1474,7 @@ lpfc_issue_els_plogi(struct lpfc_vport *vport, uint32_t did, uint8_t retry)
 
        phba->fc_stat.elsXmitPLOGI++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_plogi;
-       ret = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
        if (ret == IOCB_ERROR) {
                lpfc_els_free_iocb(phba, elsiocb);
@@ -1501,14 +1584,9 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        PRLI *npr;
        IOCB_t *icmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
-       struct lpfc_sli *psli;
        uint8_t *pcmd;
        uint16_t cmdsize;
 
-       psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
-
        cmdsize = (sizeof(uint32_t) + sizeof(PRLI));
        elsiocb = lpfc_prep_els_iocb(vport, 1, cmdsize, retry, ndlp,
                                     ndlp->nlp_DID, ELS_CMD_PRLI);
@@ -1550,7 +1628,8 @@ lpfc_issue_els_prli(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_PRLI_SND;
        spin_unlock_irq(shost->host_lock);
-       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+           IOCB_ERROR) {
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag &= ~NLP_PRLI_SND;
                spin_unlock_irq(shost->host_lock);
@@ -1608,7 +1687,8 @@ lpfc_adisc_done(struct lpfc_vport *vport)
         * and continue discovery.
         */
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
-           !(vport->fc_flag & FC_RSCN_MODE)) {
+           !(vport->fc_flag & FC_RSCN_MODE) &&
+           (phba->sli_rev < LPFC_SLI_REV4)) {
                lpfc_issue_reg_vpi(phba, vport);
                return;
        }
@@ -1788,8 +1868,6 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        ADISC *ap;
        IOCB_t *icmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli *psli = &phba->sli;
-       struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
        uint8_t *pcmd;
        uint16_t cmdsize;
 
@@ -1822,7 +1900,8 @@ lpfc_issue_els_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_ADISC_SND;
        spin_unlock_irq(shost->host_lock);
-       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+           IOCB_ERROR) {
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag &= ~NLP_ADISC_SND;
                spin_unlock_irq(shost->host_lock);
@@ -1937,15 +2016,10 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        struct lpfc_hba  *phba = vport->phba;
        IOCB_t *icmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
-       struct lpfc_sli *psli;
        uint8_t *pcmd;
        uint16_t cmdsize;
        int rc;
 
-       psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];
-
        spin_lock_irq(shost->host_lock);
        if (ndlp->nlp_flag & NLP_LOGO_SND) {
                spin_unlock_irq(shost->host_lock);
@@ -1978,7 +2052,7 @@ lpfc_issue_els_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_LOGO_SND;
        spin_unlock_irq(shost->host_lock);
-       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
        if (rc == IOCB_ERROR) {
                spin_lock_irq(shost->host_lock);
@@ -2058,14 +2132,12 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
        struct lpfc_hba  *phba = vport->phba;
        IOCB_t *icmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
        struct lpfc_sli *psli;
        uint8_t *pcmd;
        uint16_t cmdsize;
        struct lpfc_nodelist *ndlp;
 
        psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
        cmdsize = (sizeof(uint32_t) + sizeof(SCR));
 
        ndlp = lpfc_findnode_did(vport, nportid);
@@ -2108,7 +2180,8 @@ lpfc_issue_els_scr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 
        phba->fc_stat.elsXmitSCR++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
-       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+           IOCB_ERROR) {
                /* The additional lpfc_nlp_put will cause the following
                 * lpfc_els_free_iocb routine to trigger the rlease of
                 * the node.
@@ -2152,7 +2225,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
        struct lpfc_hba  *phba = vport->phba;
        IOCB_t *icmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
        struct lpfc_sli *psli;
        FARP *fp;
        uint8_t *pcmd;
@@ -2162,7 +2234,6 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
        struct lpfc_nodelist *ndlp;
 
        psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
        cmdsize = (sizeof(uint32_t) + sizeof(FARP));
 
        ndlp = lpfc_findnode_did(vport, nportid);
@@ -2219,7 +2290,8 @@ lpfc_issue_els_farpr(struct lpfc_vport *vport, uint32_t nportid, uint8_t retry)
 
        phba->fc_stat.elsXmitFARPR++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_cmd;
-       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+           IOCB_ERROR) {
                /* The additional lpfc_nlp_put will cause the following
                 * lpfc_els_free_iocb routine to trigger the release of
                 * the node.
@@ -2949,6 +3021,14 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
 
+       /*
+        * This routine is used to register and unregister in previous SLI
+        * modes.
+        */
+       if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
+           (phba->sli_rev == LPFC_SLI_REV4))
+               lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
+
        pmb->context1 = NULL;
        lpfc_mbuf_free(phba, mp->virt, mp->phys);
        kfree(mp);
@@ -2961,6 +3041,7 @@ lpfc_mbx_cmpl_dflt_rpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                 */
                lpfc_nlp_not_used(ndlp);
        }
+
        return;
 }
 
@@ -3170,7 +3251,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
        IOCB_t *icmd;
        IOCB_t *oldcmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
        struct lpfc_sli *psli;
        uint8_t *pcmd;
        uint16_t cmdsize;
@@ -3178,7 +3258,6 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
        ELS_PKT *els_pkt_ptr;
 
        psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
        oldcmd = &oldiocb->iocb;
 
        switch (flag) {
@@ -3266,7 +3345,7 @@ lpfc_els_rsp_acc(struct lpfc_vport *vport, uint32_t flag,
        }
 
        phba->fc_stat.elsXmitACC++;
-       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
@@ -3305,15 +3384,12 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
        IOCB_t *icmd;
        IOCB_t *oldcmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
        struct lpfc_sli *psli;
        uint8_t *pcmd;
        uint16_t cmdsize;
        int rc;
 
        psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
-
        cmdsize = 2 * sizeof(uint32_t);
        elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
                                     ndlp->nlp_DID, ELS_CMD_LS_RJT);
@@ -3346,7 +3422,7 @@ lpfc_els_rsp_reject(struct lpfc_vport *vport, uint32_t rejectError,
 
        phba->fc_stat.elsXmitLSRJT++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
-       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
 
        if (rc == IOCB_ERROR) {
                lpfc_els_free_iocb(phba, elsiocb);
@@ -3379,8 +3455,6 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
                       struct lpfc_nodelist *ndlp)
 {
        struct lpfc_hba  *phba = vport->phba;
-       struct lpfc_sli  *psli = &phba->sli;
-       struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
        ADISC *ap;
        IOCB_t *icmd, *oldcmd;
        struct lpfc_iocbq *elsiocb;
@@ -3422,7 +3496,7 @@ lpfc_els_rsp_adisc_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
 
        phba->fc_stat.elsXmitACC++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
-       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
@@ -3459,14 +3533,12 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
        IOCB_t *icmd;
        IOCB_t *oldcmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
        struct lpfc_sli *psli;
        uint8_t *pcmd;
        uint16_t cmdsize;
        int rc;
 
        psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];     /* ELS ring */
 
        cmdsize = sizeof(uint32_t) + sizeof(PRLI);
        elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -3520,7 +3592,7 @@ lpfc_els_rsp_prli_acc(struct lpfc_vport *vport, struct lpfc_iocbq *oldiocb,
        phba->fc_stat.elsXmitACC++;
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
 
-       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
@@ -3562,15 +3634,12 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
        RNID *rn;
        IOCB_t *icmd, *oldcmd;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli_ring *pring;
        struct lpfc_sli *psli;
        uint8_t *pcmd;
        uint16_t cmdsize;
        int rc;
 
        psli = &phba->sli;
-       pring = &psli->ring[LPFC_ELS_RING];
-
        cmdsize = sizeof(uint32_t) + sizeof(uint32_t)
                                        + (2 * sizeof(struct lpfc_name));
        if (format)
@@ -3626,7 +3695,7 @@ lpfc_els_rsp_rnid_acc(struct lpfc_vport *vport, uint8_t format,
        elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
                                    * it could be freed */
 
-       rc = lpfc_sli_issue_iocb(phba, pring, elsiocb, 0);
+       rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
        if (rc == IOCB_ERROR) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
@@ -3839,7 +3908,9 @@ lpfc_rscn_payload_check(struct lpfc_vport *vport, uint32_t did)
                        payload_len -= sizeof(uint32_t);
                        switch (rscn_did.un.b.resv & RSCN_ADDRESS_FORMAT_MASK) {
                        case RSCN_ADDRESS_FORMAT_PORT:
-                               if (ns_did.un.word == rscn_did.un.word)
+                               if ((ns_did.un.b.domain == rscn_did.un.b.domain)
+                                   && (ns_did.un.b.area == rscn_did.un.b.area)
+                                   && (ns_did.un.b.id == rscn_did.un.b.id))
                                        goto return_did_out;
                                break;
                        case RSCN_ADDRESS_FORMAT_AREA:
@@ -4300,7 +4371,7 @@ lpfc_els_rcv_flogi(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                        lpfc_init_link(phba, mbox,
                                       phba->cfg_topology,
                                       phba->cfg_link_speed);
-                       mbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+                       mbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
                        mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                        mbox->vport = vport;
                        rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
@@ -4440,8 +4511,6 @@ lpfc_els_rcv_lirr(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
 static void
 lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       struct lpfc_sli *psli = &phba->sli;
-       struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
        MAILBOX_t *mb;
        IOCB_t *icmd;
        RPS_RSP *rps_rsp;
@@ -4451,7 +4520,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        uint16_t xri, status;
        uint32_t cmdsize;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
 
        ndlp = (struct lpfc_nodelist *) pmb->context2;
        xri = (uint16_t) ((unsigned long)(pmb->context1));
@@ -4507,7 +4576,7 @@ lpfc_els_rsp_rps_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                         ndlp->nlp_rpi);
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitACC++;
-       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR)
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
                lpfc_els_free_iocb(phba, elsiocb);
        return;
 }
@@ -4616,8 +4685,6 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
        IOCB_t *icmd, *oldcmd;
        RPL_RSP rpl_rsp;
        struct lpfc_iocbq *elsiocb;
-       struct lpfc_sli *psli = &phba->sli;
-       struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
        uint8_t *pcmd;
 
        elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
@@ -4654,7 +4721,8 @@ lpfc_els_rsp_rpl_acc(struct lpfc_vport *vport, uint16_t cmdsize,
                         ndlp->nlp_rpi);
        elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
        phba->fc_stat.elsXmitACC++;
-       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+           IOCB_ERROR) {
                lpfc_els_free_iocb(phba, elsiocb);
                return 1;
        }
@@ -4883,7 +4951,10 @@ lpfc_els_rcv_fan(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
                } else {
                        /* FAN verified - skip FLOGI */
                        vport->fc_myDID = vport->fc_prevDID;
-                       lpfc_issue_fabric_reglogin(vport);
+                       if (phba->sli_rev < LPFC_SLI_REV4)
+                               lpfc_issue_fabric_reglogin(vport);
+                       else
+                               lpfc_issue_reg_vfi(vport);
                }
        }
        return 0;
@@ -5566,11 +5637,10 @@ lpfc_els_unsol_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 
 dropit:
        if (vport && !(vport->load_flag & FC_UNLOADING))
-               lpfc_printf_log(phba, KERN_ERR, LOG_ELS,
-                       "(%d):0111 Dropping received ELS cmd "
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+                       "0111 Dropping received ELS cmd "
                        "Data: x%x x%x x%x\n",
-                       vport->vpi, icmd->ulpStatus,
-                       icmd->un.ulpWord[4], icmd->ulpTimeout);
+                       icmd->ulpStatus, icmd->un.ulpWord[4], icmd->ulpTimeout);
        phba->fc_stat.elsRcvDrop++;
 }
 
@@ -5646,10 +5716,9 @@ lpfc_els_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
             icmd->ulpCommand == CMD_IOCB_RCV_SEQ64_CX)) {
                if (icmd->unsli3.rcvsli3.vpi == 0xffff)
                        vport = phba->pport;
-               else {
-                       uint16_t vpi = icmd->unsli3.rcvsli3.vpi;
-                       vport = lpfc_find_vport_by_vpid(phba, vpi);
-               }
+               else
+                       vport = lpfc_find_vport_by_vpid(phba,
+                               icmd->unsli3.rcvsli3.vpi - phba->vpi_base);
        }
        /* If there are no BDEs associated
         * with this IOCB, there is nothing to do.
@@ -5781,7 +5850,7 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        struct lpfc_vport *vport = pmb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
 
        spin_lock_irq(shost->host_lock);
        vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
@@ -5818,7 +5887,10 @@ lpfc_cmpl_reg_new_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
        } else {
                if (vport == phba->pport)
-                       lpfc_issue_fabric_reglogin(vport);
+                       if (phba->sli_rev < LPFC_SLI_REV4)
+                               lpfc_issue_fabric_reglogin(vport);
+                       else
+                               lpfc_issue_reg_vfi(vport);
                else
                        lpfc_do_scr_ns_plogi(phba, vport);
        }
@@ -5850,7 +5922,7 @@ lpfc_register_new_vport(struct lpfc_hba *phba, struct lpfc_vport *vport,
 
        mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (mbox) {
-               lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, mbox);
+               lpfc_reg_vpi(vport, mbox);
                mbox->vport = vport;
                mbox->context2 = lpfc_nlp_get(ndlp);
                mbox->mbox_cmpl = lpfc_cmpl_reg_new_vport;
@@ -6139,7 +6211,6 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        struct lpfc_hba  *phba = vport->phba;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
        IOCB_t *icmd;
        struct lpfc_iocbq *elsiocb;
        uint8_t *pcmd;
@@ -6169,7 +6240,8 @@ lpfc_issue_els_npiv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        spin_lock_irq(shost->host_lock);
        ndlp->nlp_flag |= NLP_LOGO_SND;
        spin_unlock_irq(shost->host_lock);
-       if (lpfc_sli_issue_iocb(phba, pring, elsiocb, 0) == IOCB_ERROR) {
+       if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) ==
+           IOCB_ERROR) {
                spin_lock_irq(shost->host_lock);
                ndlp->nlp_flag &= ~NLP_LOGO_SND;
                spin_unlock_irq(shost->host_lock);
@@ -6224,7 +6296,6 @@ lpfc_resume_fabric_iocbs(struct lpfc_hba *phba)
        struct lpfc_iocbq *iocb;
        unsigned long iflags;
        int ret;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
        IOCB_t *cmd;
 
 repeat:
@@ -6248,7 +6319,7 @@ repeat:
                        "Fabric sched1:   ste:x%x",
                        iocb->vport->port_state, 0, 0);
 
-               ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+               ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
 
                if (ret == IOCB_ERROR) {
                        iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6394,7 +6465,6 @@ static int
 lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
 {
        unsigned long iflags;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
        int ready;
        int ret;
 
@@ -6418,7 +6488,7 @@ lpfc_issue_fabric_iocb(struct lpfc_hba *phba, struct lpfc_iocbq *iocb)
                        "Fabric sched2:   ste:x%x",
                        iocb->vport->port_state, 0, 0);
 
-               ret = lpfc_sli_issue_iocb(phba, pring, iocb, 0);
+               ret = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, iocb, 0);
 
                if (ret == IOCB_ERROR) {
                        iocb->iocb_cmpl = iocb->fabric_iocb_cmpl;
@@ -6524,3 +6594,38 @@ void lpfc_fabric_abort_hba(struct lpfc_hba *phba)
        lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
                              IOERR_SLI_ABORTED);
 }
+
+/**
+ * lpfc_sli4_els_xri_aborted - Slow-path process of els xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the els xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 slow-path
+ * ELS aborted xri.
+ **/
+void
+lpfc_sli4_els_xri_aborted(struct lpfc_hba *phba,
+                         struct sli4_wcqe_xri_aborted *axri)
+{
+       uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+       struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+       unsigned long iflag = 0;
+
+       spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+       list_for_each_entry_safe(sglq_entry, sglq_next,
+                       &phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
+               if (sglq_entry->sli4_xritag == xri) {
+                       list_del(&sglq_entry->list);
+                       spin_unlock_irqrestore(
+                                       &phba->sli4_hba.abts_sgl_list_lock,
+                                        iflag);
+                       spin_lock_irqsave(&phba->hbalock, iflag);
+
+                       list_add_tail(&sglq_entry->list,
+                               &phba->sli4_hba.lpfc_sgl_list);
+                       spin_unlock_irqrestore(&phba->hbalock, iflag);
+                       return;
+               }
+       }
+       spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+}
index e764ce0bf7049e2f074bc8d00e1b8d329504e489..35c41ae75be248a15f7344734f1b77210b99bedf 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_scsi.h"
 #include "lpfc.h"
 #include "lpfc_logmsg.h"
@@ -273,6 +275,8 @@ lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
            !(ndlp->nlp_flag & NLP_NPR_2B_DISC) &&
            (ndlp->nlp_state != NLP_STE_UNMAPPED_NODE))
                lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
+
+       lpfc_unregister_unused_fcf(phba);
 }
 
 /**
@@ -295,10 +299,11 @@ lpfc_alloc_fast_evt(struct lpfc_hba *phba) {
 
        ret = kzalloc(sizeof(struct lpfc_fast_path_event),
                        GFP_ATOMIC);
-       if (ret)
+       if (ret) {
                atomic_inc(&phba->fast_event_count);
-       INIT_LIST_HEAD(&ret->work_evt.evt_listp);
-       ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+               INIT_LIST_HEAD(&ret->work_evt.evt_listp);
+               ret->work_evt.evt = LPFC_EVT_FASTPATH_MGMT_EVT;
+       }
        return ret;
 }
 
@@ -491,6 +496,10 @@ lpfc_work_done(struct lpfc_hba *phba)
        phba->work_ha = 0;
        spin_unlock_irq(&phba->hbalock);
 
+       /* First, try to post the next mailbox command to SLI4 device */
+       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+               lpfc_sli4_post_async_mbox(phba);
+
        if (ha_copy & HA_ERATT)
                /* Handle the error attention event */
                lpfc_handle_eratt(phba);
@@ -501,9 +510,27 @@ lpfc_work_done(struct lpfc_hba *phba)
        if (ha_copy & HA_LATT)
                lpfc_handle_latt(phba);
 
+       /* Process SLI4 events */
+       if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+               if (phba->hba_flag & FCP_XRI_ABORT_EVENT)
+                       lpfc_sli4_fcp_xri_abort_event_proc(phba);
+               if (phba->hba_flag & ELS_XRI_ABORT_EVENT)
+                       lpfc_sli4_els_xri_abort_event_proc(phba);
+               if (phba->hba_flag & ASYNC_EVENT)
+                       lpfc_sli4_async_event_proc(phba);
+               if (phba->hba_flag & HBA_POST_RECEIVE_BUFFER) {
+                       spin_lock_irq(&phba->hbalock);
+                       phba->hba_flag &= ~HBA_POST_RECEIVE_BUFFER;
+                       spin_unlock_irq(&phba->hbalock);
+                       lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
+               }
+               if (phba->hba_flag & HBA_RECEIVE_BUFFER)
+                       lpfc_sli4_handle_received_buffer(phba);
+       }
+
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i <= phba->max_vpi; i++) {
+               for (i = 0; i <= phba->max_vports; i++) {
                        /*
                         * We could have no vports in array if unloading, so if
                         * this happens then just use the pport
@@ -555,23 +582,24 @@ lpfc_work_done(struct lpfc_hba *phba)
                /*
                 * Turn on Ring interrupts
                 */
-               spin_lock_irq(&phba->hbalock);
-               control = readl(phba->HCregaddr);
-               if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
-                       lpfc_debugfs_slow_ring_trc(phba,
-                               "WRK Enable ring: cntl:x%x hacopy:x%x",
-                               control, ha_copy, 0);
-
-                       control |= (HC_R0INT_ENA << LPFC_ELS_RING);
-                       writel(control, phba->HCregaddr);
-                       readl(phba->HCregaddr); /* flush */
-               }
-               else {
-                       lpfc_debugfs_slow_ring_trc(phba,
-                               "WRK Ring ok:     cntl:x%x hacopy:x%x",
-                               control, ha_copy, 0);
+               if (phba->sli_rev <= LPFC_SLI_REV3) {
+                       spin_lock_irq(&phba->hbalock);
+                       control = readl(phba->HCregaddr);
+                       if (!(control & (HC_R0INT_ENA << LPFC_ELS_RING))) {
+                               lpfc_debugfs_slow_ring_trc(phba,
+                                       "WRK Enable ring: cntl:x%x hacopy:x%x",
+                                       control, ha_copy, 0);
+
+                               control |= (HC_R0INT_ENA << LPFC_ELS_RING);
+                               writel(control, phba->HCregaddr);
+                               readl(phba->HCregaddr); /* flush */
+                       } else {
+                               lpfc_debugfs_slow_ring_trc(phba,
+                                       "WRK Ring ok:     cntl:x%x hacopy:x%x",
+                                       control, ha_copy, 0);
+                       }
+                       spin_unlock_irq(&phba->hbalock);
                }
-               spin_unlock_irq(&phba->hbalock);
        }
        lpfc_work_list_done(phba);
 }
@@ -689,7 +717,7 @@ lpfc_port_link_failure(struct lpfc_vport *vport)
        lpfc_can_disctmo(vport);
 }
 
-static void
+void
 lpfc_linkdown_port(struct lpfc_vport *vport)
 {
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
@@ -716,6 +744,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
        if (phba->link_state == LPFC_LINK_DOWN)
                return 0;
        spin_lock_irq(&phba->hbalock);
+       phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_DISCOVERED);
        if (phba->link_state > LPFC_LINK_DOWN) {
                phba->link_state = LPFC_LINK_DOWN;
                phba->pport->fc_flag &= ~FC_LBIT;
@@ -723,7 +752,7 @@ lpfc_linkdown(struct lpfc_hba *phba)
        spin_unlock_irq(&phba->hbalock);
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        /* Issue a LINK DOWN event to all nodes */
                        lpfc_linkdown_port(vports[i]);
                }
@@ -833,10 +862,11 @@ lpfc_linkup(struct lpfc_hba *phba)
 
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
                        lpfc_linkup_port(vports[i]);
        lpfc_destroy_vport_work_array(phba, vports);
-       if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+       if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
+           (phba->sli_rev < LPFC_SLI_REV4))
                lpfc_issue_clear_la(phba, phba->pport);
 
        return 0;
@@ -854,7 +884,7 @@ lpfc_mbx_cmpl_clear_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        struct lpfc_vport *vport = pmb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
        struct lpfc_sli   *psli = &phba->sli;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        uint32_t control;
 
        /* Since we don't do discovery right now, turn these off here */
@@ -917,7 +947,7 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport *vport = pmb->vport;
 
-       if (pmb->mb.mbxStatus)
+       if (pmb->u.mb.mbxStatus)
                goto out;
 
        mempool_free(pmb, phba->mbox_mem_pool);
@@ -945,7 +975,7 @@ out:
        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
                         "0306 CONFIG_LINK mbxStatus error x%x "
                         "HBA state x%x\n",
-                        pmb->mb.mbxStatus, vport->port_state);
+                        pmb->u.mb.mbxStatus, vport->port_state);
        mempool_free(pmb, phba->mbox_mem_pool);
 
        lpfc_linkdown(phba);
@@ -958,10 +988,593 @@ out:
        return;
 }
 
+static void
+lpfc_mbx_cmpl_reg_fcfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       struct lpfc_vport *vport = mboxq->vport;
+       unsigned long flags;
+
+       if (mboxq->u.mb.mbxStatus) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+                        "2017 REG_FCFI mbxStatus error x%x "
+                        "HBA state x%x\n",
+                        mboxq->u.mb.mbxStatus, vport->port_state);
+               mempool_free(mboxq, phba->mbox_mem_pool);
+               return;
+       }
+
+       /* Start FCoE discovery by sending a FLOGI. */
+       phba->fcf.fcfi = bf_get(lpfc_reg_fcfi_fcfi, &mboxq->u.mqe.un.reg_fcfi);
+       /* Set the FCFI registered flag */
+       spin_lock_irqsave(&phba->hbalock, flags);
+       phba->fcf.fcf_flag |= FCF_REGISTERED;
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+       if (vport->port_state != LPFC_FLOGI) {
+               spin_lock_irqsave(&phba->hbalock, flags);
+               phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+               spin_unlock_irqrestore(&phba->hbalock, flags);
+               lpfc_initial_flogi(vport);
+       }
+
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       return;
+}
+
+/**
+ * lpfc_fab_name_match - Check if the fcf fabric name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's fabric name with provided
+ * fabric name. If the fabric name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_fab_name_match(uint8_t *fab_name, struct fcf_record *new_fcf_record)
+{
+       if ((fab_name[0] ==
+               bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record)) &&
+           (fab_name[1] ==
+               bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record)) &&
+           (fab_name[2] ==
+               bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record)) &&
+           (fab_name[3] ==
+               bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record)) &&
+           (fab_name[4] ==
+               bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record)) &&
+           (fab_name[5] ==
+               bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record)) &&
+           (fab_name[6] ==
+               bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record)) &&
+           (fab_name[7] ==
+               bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record)))
+               return 1;
+       else
+               return 0;
+}
+
+/**
+ * lpfc_mac_addr_match - Check if the fcf mac address match.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's mac address with HBA's
+ * FCF mac address. If the mac addresses are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_mac_addr_match(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+{
+       if ((phba->fcf.mac_addr[0] ==
+               bf_get(lpfc_fcf_record_mac_0, new_fcf_record)) &&
+           (phba->fcf.mac_addr[1] ==
+               bf_get(lpfc_fcf_record_mac_1, new_fcf_record)) &&
+           (phba->fcf.mac_addr[2] ==
+               bf_get(lpfc_fcf_record_mac_2, new_fcf_record)) &&
+           (phba->fcf.mac_addr[3] ==
+               bf_get(lpfc_fcf_record_mac_3, new_fcf_record)) &&
+           (phba->fcf.mac_addr[4] ==
+               bf_get(lpfc_fcf_record_mac_4, new_fcf_record)) &&
+           (phba->fcf.mac_addr[5] ==
+               bf_get(lpfc_fcf_record_mac_5, new_fcf_record)))
+               return 1;
+       else
+               return 0;
+}
+
+/**
+ * lpfc_copy_fcf_record - Copy fcf information to lpfc_hba.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine copies the FCF information from the FCF
+ * record to lpfc_hba data structure.
+ **/
+static void
+lpfc_copy_fcf_record(struct lpfc_hba *phba, struct fcf_record *new_fcf_record)
+{
+       phba->fcf.fabric_name[0] =
+               bf_get(lpfc_fcf_record_fab_name_0, new_fcf_record);
+       phba->fcf.fabric_name[1] =
+               bf_get(lpfc_fcf_record_fab_name_1, new_fcf_record);
+       phba->fcf.fabric_name[2] =
+               bf_get(lpfc_fcf_record_fab_name_2, new_fcf_record);
+       phba->fcf.fabric_name[3] =
+               bf_get(lpfc_fcf_record_fab_name_3, new_fcf_record);
+       phba->fcf.fabric_name[4] =
+               bf_get(lpfc_fcf_record_fab_name_4, new_fcf_record);
+       phba->fcf.fabric_name[5] =
+               bf_get(lpfc_fcf_record_fab_name_5, new_fcf_record);
+       phba->fcf.fabric_name[6] =
+               bf_get(lpfc_fcf_record_fab_name_6, new_fcf_record);
+       phba->fcf.fabric_name[7] =
+               bf_get(lpfc_fcf_record_fab_name_7, new_fcf_record);
+       phba->fcf.mac_addr[0] =
+               bf_get(lpfc_fcf_record_mac_0, new_fcf_record);
+       phba->fcf.mac_addr[1] =
+               bf_get(lpfc_fcf_record_mac_1, new_fcf_record);
+       phba->fcf.mac_addr[2] =
+               bf_get(lpfc_fcf_record_mac_2, new_fcf_record);
+       phba->fcf.mac_addr[3] =
+               bf_get(lpfc_fcf_record_mac_3, new_fcf_record);
+       phba->fcf.mac_addr[4] =
+               bf_get(lpfc_fcf_record_mac_4, new_fcf_record);
+       phba->fcf.mac_addr[5] =
+               bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
+       phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+       phba->fcf.priority = new_fcf_record->fip_priority;
+}
+
+/**
+ * lpfc_register_fcf - Register the FCF with hba.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issues a register fcfi mailbox command to register
+ * the fcf with HBA.
+ **/
+static void
+lpfc_register_fcf(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *fcf_mbxq;
+       int rc;
+       unsigned long flags;
+
+       spin_lock_irqsave(&phba->hbalock, flags);
+
+       /* If the FCF is not availabe do nothing. */
+       if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
+               spin_unlock_irqrestore(&phba->hbalock, flags);
+               return;
+       }
+
+       /* The FCF is already registered, start discovery */
+       if (phba->fcf.fcf_flag & FCF_REGISTERED) {
+               phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+               spin_unlock_irqrestore(&phba->hbalock, flags);
+               if (phba->pport->port_state != LPFC_FLOGI)
+                       lpfc_initial_flogi(phba->pport);
+               return;
+       }
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+
+       fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
+               GFP_KERNEL);
+       if (!fcf_mbxq)
+               return;
+
+       lpfc_reg_fcfi(phba, fcf_mbxq);
+       fcf_mbxq->vport = phba->pport;
+       fcf_mbxq->mbox_cmpl = lpfc_mbx_cmpl_reg_fcfi;
+       rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED)
+               mempool_free(fcf_mbxq, phba->mbox_mem_pool);
+
+       return;
+}
+
+/**
+ * lpfc_match_fcf_conn_list - Check if the FCF record can be used for discovery.
+ * @phba: pointer to lpfc hba data structure.
+ * @new_fcf_record: pointer to fcf record.
+ * @boot_flag: Indicates if this record used by boot bios.
+ * @addr_mode: The address mode to be used by this FCF
+ *
+ * This routine compare the fcf record with connect list obtained from the
+ * config region to decide if this FCF can be used for SAN discovery. It returns
+ * 1 if this record can be used for SAN discovery else return zero. If this FCF
+ * record can be used for SAN discovery, the boot_flag will indicate if this FCF
+ * is used by boot bios and addr_mode will indicate the addressing mode to be
+ * used for this FCF when the function returns.
+ * If the FCF record need to be used with a particular vlan id, the vlan is
+ * set in the vlan_id on return of the function. If not VLAN tagging need to
+ * be used with the FCF vlan_id will be set to 0xFFFF;
+ **/
+static int
+lpfc_match_fcf_conn_list(struct lpfc_hba *phba,
+                       struct fcf_record *new_fcf_record,
+                       uint32_t *boot_flag, uint32_t *addr_mode,
+                       uint16_t *vlan_id)
+{
+       struct lpfc_fcf_conn_entry *conn_entry;
+
+       if (!phba->cfg_enable_fip) {
+               *boot_flag = 0;
+               *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+                               new_fcf_record);
+               if (phba->valid_vlan)
+                       *vlan_id = phba->vlan_id;
+               else
+                       *vlan_id = 0xFFFF;
+               return 1;
+       }
+
+       /*
+        * If there are no FCF connection table entry, driver connect to all
+        * FCFs.
+        */
+       if (list_empty(&phba->fcf_conn_rec_list)) {
+               *boot_flag = 0;
+               *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+                       new_fcf_record);
+               *vlan_id = 0xFFFF;
+               return 1;
+       }
+
+       list_for_each_entry(conn_entry, &phba->fcf_conn_rec_list, list) {
+               if (!(conn_entry->conn_rec.flags & FCFCNCT_VALID))
+                       continue;
+
+               if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
+                       !lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
+                               new_fcf_record))
+                       continue;
+
+               if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
+                       /*
+                        * If the vlan bit map does not have the bit set for the
+                        * vlan id to be used, then it is not a match.
+                        */
+                       if (!(new_fcf_record->vlan_bitmap
+                               [conn_entry->conn_rec.vlan_tag / 8] &
+                               (1 << (conn_entry->conn_rec.vlan_tag % 8))))
+                               continue;
+               }
+
+               /*
+                * Check if the connection record specifies a required
+                * addressing mode.
+                */
+               if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+                       !(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)) {
+
+                       /*
+                        * If SPMA required but FCF not support this continue.
+                        */
+                       if ((conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+                               !(bf_get(lpfc_fcf_record_mac_addr_prov,
+                                       new_fcf_record) & LPFC_FCF_SPMA))
+                               continue;
+
+                       /*
+                        * If FPMA required but FCF not support this continue.
+                        */
+                       if (!(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+                               !(bf_get(lpfc_fcf_record_mac_addr_prov,
+                               new_fcf_record) & LPFC_FCF_FPMA))
+                               continue;
+               }
+
+               /*
+                * This fcf record matches filtering criteria.
+                */
+               if (conn_entry->conn_rec.flags & FCFCNCT_BOOT)
+                       *boot_flag = 1;
+               else
+                       *boot_flag = 0;
+
+               *addr_mode = bf_get(lpfc_fcf_record_mac_addr_prov,
+                               new_fcf_record);
+               /*
+                * If the user specified a required address mode, assign that
+                * address mode
+                */
+               if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+                       (!(conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED)))
+                       *addr_mode = (conn_entry->conn_rec.flags &
+                               FCFCNCT_AM_SPMA) ?
+                               LPFC_FCF_SPMA : LPFC_FCF_FPMA;
+               /*
+                * If the user specified a prefered address mode, use the
+                * addr mode only if FCF support the addr_mode.
+                */
+               else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+                       (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+                       (conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+                       (*addr_mode & LPFC_FCF_SPMA))
+                               *addr_mode = LPFC_FCF_SPMA;
+               else if ((conn_entry->conn_rec.flags & FCFCNCT_AM_VALID) &&
+                       (conn_entry->conn_rec.flags & FCFCNCT_AM_PREFERRED) &&
+                       !(conn_entry->conn_rec.flags & FCFCNCT_AM_SPMA) &&
+                       (*addr_mode & LPFC_FCF_FPMA))
+                               *addr_mode = LPFC_FCF_FPMA;
+               /*
+                * If user did not specify any addressing mode, use FPMA if
+                * possible else use SPMA.
+                */
+               else if (*addr_mode & LPFC_FCF_FPMA)
+                       *addr_mode = LPFC_FCF_FPMA;
+
+               if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID)
+                       *vlan_id = conn_entry->conn_rec.vlan_tag;
+               else
+                       *vlan_id = 0xFFFF;
+
+               return 1;
+       }
+
+       return 0;
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This function iterate through all the fcf records available in
+ * HBA and choose the optimal FCF record for discovery. After finding
+ * the FCF for discovery it register the FCF record and kick start
+ * discovery.
+ * If FCF_IN_USE flag is set in currently used FCF, the routine try to
+ * use a FCF record which match fabric name and mac address of the
+ * currently used FCF record.
+ * If the driver support only one FCF, it will try to use the FCF record
+ * used by BOOT_BIOS.
+ */
+void
+lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       void *virt_addr;
+       dma_addr_t phys_addr;
+       uint8_t *bytep;
+       struct lpfc_mbx_sge sge;
+       struct lpfc_mbx_read_fcf_tbl *read_fcf;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+       struct fcf_record *new_fcf_record;
+       int rc;
+       uint32_t boot_flag, addr_mode;
+       uint32_t next_fcf_index;
+       unsigned long flags;
+       uint16_t vlan_id;
+
+       /* Get the first SGE entry from the non-embedded DMA memory. This
+        * routine only uses a single SGE.
+        */
+       lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+       phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+       if (unlikely(!mboxq->sge_array)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "2524 Failed to get the non-embedded SGE "
+                               "virtual address\n");
+               goto out;
+       }
+       virt_addr = mboxq->sge_array->addr[0];
+
+       shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+                                &shdr->response);
+       /*
+        * The FCF Record was read and there is no reason for the driver
+        * to maintain the FCF record data or memory. Instead, just need
+        * to book keeping the FCFIs can be used.
+        */
+       if (shdr_status || shdr_add_status) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2521 READ_FCF_RECORD mailbox failed "
+                               "with status x%x add_status x%x, mbx\n",
+                               shdr_status, shdr_add_status);
+               goto out;
+       }
+       /* Interpreting the returned information of FCF records */
+       read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+       lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
+                             sizeof(struct lpfc_mbx_read_fcf_tbl));
+       next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
+
+       new_fcf_record = (struct fcf_record *)(virt_addr +
+                         sizeof(struct lpfc_mbx_read_fcf_tbl));
+       lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
+                             sizeof(struct fcf_record));
+       bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+
+       rc = lpfc_match_fcf_conn_list(phba, new_fcf_record,
+                                     &boot_flag, &addr_mode,
+                                       &vlan_id);
+       /*
+        * If the fcf record does not match with connect list entries
+        * read the next entry.
+        */
+       if (!rc)
+               goto read_next_fcf;
+       /*
+        * If this is not the first FCF discovery of the HBA, use last
+        * FCF record for the discovery.
+        */
+       spin_lock_irqsave(&phba->hbalock, flags);
+       if (phba->fcf.fcf_flag & FCF_IN_USE) {
+               if (lpfc_fab_name_match(phba->fcf.fabric_name,
+                       new_fcf_record) &&
+                   lpfc_mac_addr_match(phba, new_fcf_record)) {
+                       phba->fcf.fcf_flag |= FCF_AVAILABLE;
+                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       goto out;
+               }
+               spin_unlock_irqrestore(&phba->hbalock, flags);
+               goto read_next_fcf;
+       }
+       if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
+               /*
+                * If the current FCF record does not have boot flag
+                * set and new fcf record has boot flag set, use the
+                * new fcf record.
+                */
+               if (boot_flag && !(phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
+                       /* Use this FCF record */
+                       lpfc_copy_fcf_record(phba, new_fcf_record);
+                       phba->fcf.addr_mode = addr_mode;
+                       phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
+                       if (vlan_id != 0xFFFF) {
+                               phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+                               phba->fcf.vlan_id = vlan_id;
+                       }
+                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       goto read_next_fcf;
+               }
+               /*
+                * If the current FCF record has boot flag set and the
+                * new FCF record does not have boot flag, read the next
+                * FCF record.
+                */
+               if (!boot_flag && (phba->fcf.fcf_flag & FCF_BOOT_ENABLE)) {
+                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       goto read_next_fcf;
+               }
+               /*
+                * If there is a record with lower priority value for
+                * the current FCF, use that record.
+                */
+               if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
+                       && (new_fcf_record->fip_priority <
+                               phba->fcf.priority)) {
+                       /* Use this FCF record */
+                       lpfc_copy_fcf_record(phba, new_fcf_record);
+                       phba->fcf.addr_mode = addr_mode;
+                       if (vlan_id != 0xFFFF) {
+                               phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+                               phba->fcf.vlan_id = vlan_id;
+                       }
+                       spin_unlock_irqrestore(&phba->hbalock, flags);
+                       goto read_next_fcf;
+               }
+               spin_unlock_irqrestore(&phba->hbalock, flags);
+               goto read_next_fcf;
+       }
+       /*
+        * This is the first available FCF record, use this
+        * record.
+        */
+       lpfc_copy_fcf_record(phba, new_fcf_record);
+       phba->fcf.addr_mode = addr_mode;
+       if (boot_flag)
+               phba->fcf.fcf_flag |= FCF_BOOT_ENABLE;
+       phba->fcf.fcf_flag |= FCF_AVAILABLE;
+       if (vlan_id != 0xFFFF) {
+               phba->fcf.fcf_flag |= FCF_VALID_VLAN;
+               phba->fcf.vlan_id = vlan_id;
+       }
+       spin_unlock_irqrestore(&phba->hbalock, flags);
+       goto read_next_fcf;
+
+read_next_fcf:
+       lpfc_sli4_mbox_cmd_free(phba, mboxq);
+       if (next_fcf_index == LPFC_FCOE_FCF_NEXT_NONE || next_fcf_index == 0)
+               lpfc_register_fcf(phba);
+       else
+               lpfc_sli4_read_fcf_record(phba, next_fcf_index);
+       return;
+
+out:
+       lpfc_sli4_mbox_cmd_free(phba, mboxq);
+       lpfc_register_fcf(phba);
+
+       return;
+}
+
+/**
+ * lpfc_start_fdiscs - send fdiscs for each vports on this port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function loops through the list of vports on the @phba and issues an
+ * FDISC if possible.
+ */
+void
+lpfc_start_fdiscs(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       int i;
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+                       if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
+                               continue;
+                       /* There are no vpi for this vport */
+                       if (vports[i]->vpi > phba->max_vpi) {
+                               lpfc_vport_set_state(vports[i],
+                                                    FC_VPORT_FAILED);
+                               continue;
+                       }
+                       if (phba->fc_topology == TOPOLOGY_LOOP) {
+                               lpfc_vport_set_state(vports[i],
+                                                    FC_VPORT_LINKDOWN);
+                               continue;
+                       }
+                       if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+                               lpfc_initial_fdisc(vports[i]);
+                       else {
+                               lpfc_vport_set_state(vports[i],
+                                                    FC_VPORT_NO_FABRIC_SUPP);
+                               lpfc_printf_vlog(vports[i], KERN_ERR,
+                                                LOG_ELS,
+                                                "0259 No NPIV "
+                                                "Fabric support\n");
+                       }
+               }
+       }
+       lpfc_destroy_vport_work_array(phba, vports);
+}
+
+void
+lpfc_mbx_cmpl_reg_vfi(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       struct lpfc_dmabuf *dmabuf = mboxq->context1;
+       struct lpfc_vport *vport = mboxq->vport;
+
+       if (mboxq->u.mb.mbxStatus) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+                        "2018 REG_VFI mbxStatus error x%x "
+                        "HBA state x%x\n",
+                        mboxq->u.mb.mbxStatus, vport->port_state);
+               if (phba->fc_topology == TOPOLOGY_LOOP) {
+                       /* FLOGI failed, use loop map to make discovery list */
+                       lpfc_disc_list_loopmap(vport);
+                       /* Start discovery */
+                       lpfc_disc_start(vport);
+                       goto fail_free_mem;
+               }
+               lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+               goto fail_free_mem;
+       }
+       /* Mark the vport has registered with its VFI */
+       vport->vfi_state |= LPFC_VFI_REGISTERED;
+
+       if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
+               lpfc_start_fdiscs(phba);
+               lpfc_do_scr_ns_plogi(phba, vport);
+       }
+
+fail_free_mem:
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
+       kfree(dmabuf);
+       return;
+}
+
 static void
 lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) pmb->context1;
        struct lpfc_vport  *vport = pmb->vport;
 
@@ -1012,13 +1625,13 @@ static void
 lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
 {
        struct lpfc_vport *vport = phba->pport;
-       LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox;
+       LPFC_MBOXQ_t *sparam_mbox, *cfglink_mbox = NULL;
        int i;
        struct lpfc_dmabuf *mp;
        int rc;
+       struct fcf_record *fcf_record;
 
        sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-       cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 
        spin_lock_irq(&phba->hbalock);
        switch (la->UlnkSpeed) {
@@ -1034,6 +1647,9 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
        case LA_8GHZ_LINK:
                phba->fc_linkspeed = LA_8GHZ_LINK;
                break;
+       case LA_10GHZ_LINK:
+               phba->fc_linkspeed = LA_10GHZ_LINK;
+               break;
        default:
                phba->fc_linkspeed = LA_UNKNW_LINK;
                break;
@@ -1115,22 +1731,66 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, READ_LA_VAR *la)
                        lpfc_mbuf_free(phba, mp->virt, mp->phys);
                        kfree(mp);
                        mempool_free(sparam_mbox, phba->mbox_mem_pool);
-                       if (cfglink_mbox)
-                               mempool_free(cfglink_mbox, phba->mbox_mem_pool);
                        goto out;
                }
        }
 
-       if (cfglink_mbox) {
+       if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
+               cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+               if (!cfglink_mbox)
+                       goto out;
                vport->port_state = LPFC_LOCAL_CFG_LINK;
                lpfc_config_link(phba, cfglink_mbox);
                cfglink_mbox->vport = vport;
                cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link;
                rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT);
-               if (rc != MBX_NOT_FINISHED)
-                       return;
-               mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+               if (rc == MBX_NOT_FINISHED) {
+                       mempool_free(cfglink_mbox, phba->mbox_mem_pool);
+                       goto out;
+               }
+       } else {
+               /*
+                * Add the driver's default FCF record at FCF index 0 now. This
+                * is phase 1 implementation that support FCF index 0 and driver
+                * defaults.
+                */
+               if (phba->cfg_enable_fip == 0) {
+                       fcf_record = kzalloc(sizeof(struct fcf_record),
+                                       GFP_KERNEL);
+                       if (unlikely(!fcf_record)) {
+                               lpfc_printf_log(phba, KERN_ERR,
+                                       LOG_MBOX | LOG_SLI,
+                                       "2554 Could not allocate memmory for "
+                                       "fcf record\n");
+                               rc = -ENODEV;
+                               goto out;
+                       }
+
+                       lpfc_sli4_build_dflt_fcf_record(phba, fcf_record,
+                                               LPFC_FCOE_FCF_DEF_INDEX);
+                       rc = lpfc_sli4_add_fcf_record(phba, fcf_record);
+                       if (unlikely(rc)) {
+                               lpfc_printf_log(phba, KERN_ERR,
+                                       LOG_MBOX | LOG_SLI,
+                                       "2013 Could not manually add FCF "
+                                       "record 0, status %d\n", rc);
+                               rc = -ENODEV;
+                               kfree(fcf_record);
+                               goto out;
+                       }
+                       kfree(fcf_record);
+               }
+               /*
+                * The driver is expected to do FIP/FCF. Call the port
+                * and get the FCF Table.
+                */
+               rc = lpfc_sli4_read_fcf_record(phba,
+                                       LPFC_FCOE_FCF_GET_FIRST);
+               if (rc)
+                       goto out;
        }
+
+       return;
 out:
        lpfc_vport_set_state(vport, FC_VPORT_FAILED);
        lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
@@ -1147,10 +1807,12 @@ lpfc_enable_la(struct lpfc_hba *phba)
        struct lpfc_sli *psli = &phba->sli;
        spin_lock_irq(&phba->hbalock);
        psli->sli_flag |= LPFC_PROCESS_LA;
-       control = readl(phba->HCregaddr);
-       control |= HC_LAINT_ENA;
-       writel(control, phba->HCregaddr);
-       readl(phba->HCregaddr); /* flush */
+       if (phba->sli_rev <= LPFC_SLI_REV3) {
+               control = readl(phba->HCregaddr);
+               control |= HC_LAINT_ENA;
+               writel(control, phba->HCregaddr);
+               readl(phba->HCregaddr); /* flush */
+       }
        spin_unlock_irq(&phba->hbalock);
 }
 
@@ -1159,6 +1821,7 @@ lpfc_mbx_issue_link_down(struct lpfc_hba *phba)
 {
        lpfc_linkdown(phba);
        lpfc_enable_la(phba);
+       lpfc_unregister_unused_fcf(phba);
        /* turn on Link Attention interrupts - no CLEAR_LA needed */
 }
 
@@ -1175,7 +1838,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        struct lpfc_vport *vport = pmb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
        READ_LA_VAR *la;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
 
        /* Unblock ELS traffic */
@@ -1190,7 +1853,7 @@ lpfc_mbx_cmpl_read_la(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                goto lpfc_mbx_cmpl_read_la_free_mbuf;
        }
 
-       la = (READ_LA_VAR *) & pmb->mb.un.varReadLA;
+       la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
 
        memcpy(&phba->alpa_map[0], mp->virt, 128);
 
@@ -1328,7 +1991,7 @@ lpfc_mbx_cmpl_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 static void
 lpfc_mbx_cmpl_unreg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_vport *vport = pmb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
 
@@ -1381,7 +2044,7 @@ lpfc_mbx_cmpl_reg_vpi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport *vport = pmb->vport;
        struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
 
        switch (mb->mbxStatus) {
        case 0x0011:
@@ -1416,6 +2079,128 @@ out:
        return;
 }
 
+/**
+ * lpfc_create_static_vport - Read HBA config region to create static vports.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine issue a DUMP mailbox command for config region 22 to get
+ * the list of static vports to be created. The function create vports
+ * based on the information returned from the HBA.
+ **/
+void
+lpfc_create_static_vport(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *pmb = NULL;
+       MAILBOX_t *mb;
+       struct static_vport_info *vport_info;
+       int rc, i;
+       struct fc_vport_identifiers vport_id;
+       struct fc_vport *new_fc_vport;
+       struct Scsi_Host *shost;
+       struct lpfc_vport *vport;
+       uint16_t offset = 0;
+       uint8_t *vport_buff;
+
+       pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0542 lpfc_create_static_vport failed to"
+                               " allocate mailbox memory\n");
+               return;
+       }
+
+       mb = &pmb->u.mb;
+
+       vport_info = kzalloc(sizeof(struct static_vport_info), GFP_KERNEL);
+       if (!vport_info) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0543 lpfc_create_static_vport failed to"
+                               " allocate vport_info\n");
+               mempool_free(pmb, phba->mbox_mem_pool);
+               return;
+       }
+
+       vport_buff = (uint8_t *) vport_info;
+       do {
+               lpfc_dump_static_vport(phba, pmb, offset);
+               pmb->vport = phba->pport;
+               rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
+
+               if ((rc != MBX_SUCCESS) || mb->mbxStatus) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "0544 lpfc_create_static_vport failed to"
+                               " issue dump mailbox command ret 0x%x "
+                               "status 0x%x\n",
+                               rc, mb->mbxStatus);
+                       goto out;
+               }
+
+               if (mb->un.varDmp.word_cnt >
+                       sizeof(struct static_vport_info) - offset)
+                       mb->un.varDmp.word_cnt =
+                       sizeof(struct static_vport_info) - offset;
+
+               lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+                       vport_buff + offset,
+                       mb->un.varDmp.word_cnt);
+               offset += mb->un.varDmp.word_cnt;
+
+       } while (mb->un.varDmp.word_cnt &&
+               offset < sizeof(struct static_vport_info));
+
+
+       if ((le32_to_cpu(vport_info->signature) != VPORT_INFO_SIG) ||
+               ((le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK)
+                       != VPORT_INFO_REV)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "0545 lpfc_create_static_vport bad"
+                       " information header 0x%x 0x%x\n",
+                       le32_to_cpu(vport_info->signature),
+                       le32_to_cpu(vport_info->rev) & VPORT_INFO_REV_MASK);
+
+               goto out;
+       }
+
+       shost = lpfc_shost_from_vport(phba->pport);
+
+       for (i = 0; i < MAX_STATIC_VPORT_COUNT; i++) {
+               memset(&vport_id, 0, sizeof(vport_id));
+               vport_id.port_name = wwn_to_u64(vport_info->vport_list[i].wwpn);
+               vport_id.node_name = wwn_to_u64(vport_info->vport_list[i].wwnn);
+               if (!vport_id.port_name || !vport_id.node_name)
+                       continue;
+
+               vport_id.roles = FC_PORT_ROLE_FCP_INITIATOR;
+               vport_id.vport_type = FC_PORTTYPE_NPIV;
+               vport_id.disable = false;
+               new_fc_vport = fc_vport_create(shost, 0, &vport_id);
+
+               if (!new_fc_vport) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "0546 lpfc_create_static_vport failed to"
+                               " create vport \n");
+                       continue;
+               }
+
+               vport = *(struct lpfc_vport **)new_fc_vport->dd_data;
+               vport->vport_flag |= STATIC_VPORT;
+       }
+
+out:
+       /*
+        * If this is timed out command, setting NULL to context2 tell SLI
+        * layer not to use this buffer.
+        */
+       spin_lock_irq(&phba->hbalock);
+       pmb->context2 = NULL;
+       spin_unlock_irq(&phba->hbalock);
+       kfree(vport_info);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(pmb, phba->mbox_mem_pool);
+
+       return;
+}
+
 /*
  * This routine handles processing a Fabric REG_LOGIN mailbox
  * command upon completion. It is setup in the LPFC_MBOXQ
@@ -1426,16 +2211,17 @@ void
 lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_vport *vport = pmb->vport;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
        struct lpfc_nodelist *ndlp;
-       struct lpfc_vport **vports;
-       int i;
 
        ndlp = (struct lpfc_nodelist *) pmb->context2;
        pmb->context1 = NULL;
        pmb->context2 = NULL;
        if (mb->mbxStatus) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
+                                "0258 Register Fabric login error: 0x%x\n",
+                                mb->mbxStatus);
                lpfc_mbuf_free(phba, mp->virt, mp->phys);
                kfree(mp);
                mempool_free(pmb, phba->mbox_mem_pool);
@@ -1454,9 +2240,6 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                }
 
                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_MBOX,
-                                "0258 Register Fabric login error: 0x%x\n",
-                                mb->mbxStatus);
                /* Decrement the reference count to ndlp after the reference
                 * to the ndlp are done.
                 */
@@ -1465,34 +2248,12 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        }
 
        ndlp->nlp_rpi = mb->un.varWords[0];
+       ndlp->nlp_flag |= NLP_RPI_VALID;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
        if (vport->port_state == LPFC_FABRIC_CFG_LINK) {
-               vports = lpfc_create_vport_work_array(phba);
-               if (vports != NULL)
-                       for(i = 0;
-                           i <= phba->max_vpi && vports[i] != NULL;
-                           i++) {
-                               if (vports[i]->port_type == LPFC_PHYSICAL_PORT)
-                                       continue;
-                               if (phba->fc_topology == TOPOLOGY_LOOP) {
-                                       lpfc_vport_set_state(vports[i],
-                                                       FC_VPORT_LINKDOWN);
-                                       continue;
-                               }
-                               if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
-                                       lpfc_initial_fdisc(vports[i]);
-                               else {
-                                       lpfc_vport_set_state(vports[i],
-                                               FC_VPORT_NO_FABRIC_SUPP);
-                                       lpfc_printf_vlog(vport, KERN_ERR,
-                                                        LOG_ELS,
-                                                       "0259 No NPIV "
-                                                       "Fabric support\n");
-                               }
-                       }
-               lpfc_destroy_vport_work_array(phba, vports);
+               lpfc_start_fdiscs(phba);
                lpfc_do_scr_ns_plogi(phba, vport);
        }
 
@@ -1516,13 +2277,16 @@ lpfc_mbx_cmpl_fabric_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 void
 lpfc_mbx_cmpl_ns_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_dmabuf *mp = (struct lpfc_dmabuf *) (pmb->context1);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
        struct lpfc_vport *vport = pmb->vport;
 
        if (mb->mbxStatus) {
 out:
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+                                "0260 Register NameServer error: 0x%x\n",
+                                mb->mbxStatus);
                /* decrement the node reference count held for this
                 * callback function.
                 */
@@ -1546,15 +2310,13 @@ out:
                        return;
                }
                lpfc_vport_set_state(vport, FC_VPORT_FAILED);
-               lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
-                                "0260 Register NameServer error: 0x%x\n",
-                                mb->mbxStatus);
                return;
        }
 
        pmb->context1 = NULL;
 
        ndlp->nlp_rpi = mb->un.varWords[0];
+       ndlp->nlp_flag |= NLP_RPI_VALID;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -2055,7 +2817,7 @@ lpfc_check_sli_ndlp(struct lpfc_hba *phba,
        if (pring->ringno == LPFC_ELS_RING) {
                switch (icmd->ulpCommand) {
                case CMD_GEN_REQUEST64_CR:
-                       if (icmd->ulpContext == (volatile ushort)ndlp->nlp_rpi)
+                       if (iocb->context_un.ndlp == ndlp)
                                return 1;
                case CMD_ELS_REQUEST64_CR:
                        if (icmd->un.elsreq64.remoteID == ndlp->nlp_DID)
@@ -2102,7 +2864,7 @@ lpfc_no_rpi(struct lpfc_hba *phba, struct lpfc_nodelist *ndlp)
         */
        psli = &phba->sli;
        rpi = ndlp->nlp_rpi;
-       if (rpi) {
+       if (ndlp->nlp_flag & NLP_RPI_VALID) {
                /* Now process each ring */
                for (i = 0; i < psli->num_rings; i++) {
                        pring = &psli->ring[i];
@@ -2150,7 +2912,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        LPFC_MBOXQ_t    *mbox;
        int rc;
 
-       if (ndlp->nlp_rpi) {
+       if (ndlp->nlp_flag & NLP_RPI_VALID) {
                mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
                if (mbox) {
                        lpfc_unreg_login(phba, vport->vpi, ndlp->nlp_rpi, mbox);
@@ -2162,6 +2924,7 @@ lpfc_unreg_rpi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
                }
                lpfc_no_rpi(phba, ndlp);
                ndlp->nlp_rpi = 0;
+               ndlp->nlp_flag &= ~NLP_RPI_VALID;
                return 1;
        }
        return 0;
@@ -2252,7 +3015,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
        if ((mb = phba->sli.mbox_active)) {
-               if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+               if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
                   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
                        mb->context2 = NULL;
                        mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -2261,7 +3024,7 @@ lpfc_cleanup_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 
        spin_lock_irq(&phba->hbalock);
        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
-               if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+               if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
                    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
                        mp = (struct lpfc_dmabuf *) (mb->context1);
                        if (mp) {
@@ -2309,13 +3072,14 @@ lpfc_nlp_remove(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
        int rc;
 
        lpfc_cancel_retry_delay_tmo(vport, ndlp);
-       if (ndlp->nlp_flag & NLP_DEFER_RM && !ndlp->nlp_rpi) {
+       if ((ndlp->nlp_flag & NLP_DEFER_RM) &&
+           !(ndlp->nlp_flag & NLP_RPI_VALID)) {
                /* For this case we need to cleanup the default rpi
                 * allocated by the firmware.
                 */
                if ((mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL))
                        != NULL) {
-                       rc = lpfc_reg_login(phba, vport->vpi, ndlp->nlp_DID,
+                       rc = lpfc_reg_rpi(phba, vport->vpi, ndlp->nlp_DID,
                            (uint8_t *) &vport->fc_sparam, mbox, 0);
                        if (rc) {
                                mempool_free(mbox, phba->mbox_mem_pool);
@@ -2553,7 +3317,8 @@ lpfc_issue_clear_la(struct lpfc_hba *phba, struct lpfc_vport *vport)
         * clear_la then don't send it.
         */
        if ((phba->link_state >= LPFC_CLEAR_LA) ||
-           (vport->port_type != LPFC_PHYSICAL_PORT))
+           (vport->port_type != LPFC_PHYSICAL_PORT) ||
+               (phba->sli_rev == LPFC_SLI_REV4))
                return;
 
                        /* Link up discovery */
@@ -2582,7 +3347,7 @@ lpfc_issue_reg_vpi(struct lpfc_hba *phba, struct lpfc_vport *vport)
 
        regvpimbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (regvpimbox) {
-               lpfc_reg_vpi(phba, vport->vpi, vport->fc_myDID, regvpimbox);
+               lpfc_reg_vpi(vport, regvpimbox);
                regvpimbox->mbox_cmpl = lpfc_mbx_cmpl_reg_vpi;
                regvpimbox->vport = vport;
                if (lpfc_sli_issue_mbox(phba, regvpimbox, MBX_NOWAIT)
@@ -2642,7 +3407,8 @@ lpfc_disc_start(struct lpfc_vport *vport)
         */
        if ((phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) &&
            !(vport->fc_flag & FC_PT2PT) &&
-           !(vport->fc_flag & FC_RSCN_MODE)) {
+           !(vport->fc_flag & FC_RSCN_MODE) &&
+           (phba->sli_rev < LPFC_SLI_REV4)) {
                lpfc_issue_reg_vpi(phba, vport);
                return;
        }
@@ -2919,11 +3685,13 @@ restart_disc:
                 * set port_state to PORT_READY if SLI2.
                 * cmpl_reg_vpi will set port_state to READY for SLI3.
                 */
-               if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
-                       lpfc_issue_reg_vpi(phba, vport);
-               else  { /* NPIV Not enabled */
-                       lpfc_issue_clear_la(phba, vport);
-                       vport->port_state = LPFC_VPORT_READY;
+               if (phba->sli_rev < LPFC_SLI_REV4) {
+                       if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+                               lpfc_issue_reg_vpi(phba, vport);
+                       else  { /* NPIV Not enabled */
+                               lpfc_issue_clear_la(phba, vport);
+                               vport->port_state = LPFC_VPORT_READY;
+                       }
                }
 
                /* Setup and issue mailbox INITIALIZE LINK command */
@@ -2939,7 +3707,7 @@ restart_disc:
                lpfc_linkdown(phba);
                lpfc_init_link(phba, initlinkmbox, phba->cfg_topology,
                               phba->cfg_link_speed);
-               initlinkmbox->mb.un.varInitLnk.lipsr_AL_PA = 0;
+               initlinkmbox->u.mb.un.varInitLnk.lipsr_AL_PA = 0;
                initlinkmbox->vport = vport;
                initlinkmbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                rc = lpfc_sli_issue_mbox(phba, initlinkmbox, MBX_NOWAIT);
@@ -2959,11 +3727,13 @@ restart_disc:
                 * set port_state to PORT_READY if SLI2.
                 * cmpl_reg_vpi will set port_state to READY for SLI3.
                 */
-               if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
-                       lpfc_issue_reg_vpi(phba, vport);
-               else {  /* NPIV Not enabled */
-                       lpfc_issue_clear_la(phba, vport);
-                       vport->port_state = LPFC_VPORT_READY;
+               if (phba->sli_rev < LPFC_SLI_REV4) {
+                       if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
+                               lpfc_issue_reg_vpi(phba, vport);
+                       else  { /* NPIV Not enabled */
+                               lpfc_issue_clear_la(phba, vport);
+                               vport->port_state = LPFC_VPORT_READY;
+                       }
                }
                break;
 
@@ -3036,7 +3806,7 @@ restart_disc:
 void
 lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_dmabuf   *mp = (struct lpfc_dmabuf *) (pmb->context1);
        struct lpfc_nodelist *ndlp = (struct lpfc_nodelist *) pmb->context2;
        struct lpfc_vport    *vport = pmb->vport;
@@ -3044,6 +3814,7 @@ lpfc_mbx_cmpl_fdmi_reg_login(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        pmb->context1 = NULL;
 
        ndlp->nlp_rpi = mb->un.varWords[0];
+       ndlp->nlp_flag |= NLP_RPI_VALID;
        ndlp->nlp_type |= NLP_FABRIC;
        lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
 
@@ -3297,3 +4068,395 @@ lpfc_nlp_not_used(struct lpfc_nodelist *ndlp)
                        return 1;
        return 0;
 }
+
+/**
+ * lpfc_fcf_inuse - Check if FCF can be unregistered.
+ * @phba: Pointer to hba context object.
+ *
+ * This function iterate through all FC nodes associated
+ * will all vports to check if there is any node with
+ * fc_rports associated with it. If there is an fc_rport
+ * associated with the node, then the node is either in
+ * discovered state or its devloss_timer is pending.
+ */
+static int
+lpfc_fcf_inuse(struct lpfc_hba *phba)
+{
+       struct lpfc_vport **vports;
+       int i, ret = 0;
+       struct lpfc_nodelist *ndlp;
+       struct Scsi_Host  *shost;
+
+       vports = lpfc_create_vport_work_array(phba);
+
+       for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+               shost = lpfc_shost_from_vport(vports[i]);
+               spin_lock_irq(shost->host_lock);
+               list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
+                       if (NLP_CHK_NODE_ACT(ndlp) && ndlp->rport &&
+                         (ndlp->rport->roles & FC_RPORT_ROLE_FCP_TARGET)) {
+                               ret = 1;
+                               spin_unlock_irq(shost->host_lock);
+                               goto out;
+                       }
+               }
+               spin_unlock_irq(shost->host_lock);
+       }
+out:
+       lpfc_destroy_vport_work_array(phba, vports);
+       return ret;
+}
+
+/**
+ * lpfc_unregister_vfi_cmpl - Completion handler for unreg vfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       struct lpfc_vport *vport = mboxq->vport;
+
+       if (mboxq->u.mb.mbxStatus) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                       "2555 UNREG_VFI mbxStatus error x%x "
+                       "HBA state x%x\n",
+                       mboxq->u.mb.mbxStatus, vport->port_state);
+       }
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       return;
+}
+
+/**
+ * lpfc_unregister_fcfi_cmpl - Completion handler for unreg fcfi.
+ * @phba: Pointer to hba context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This function frees memory associated with the mailbox command.
+ */
+static void
+lpfc_unregister_fcfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       struct lpfc_vport *vport = mboxq->vport;
+
+       if (mboxq->u.mb.mbxStatus) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                       "2550 UNREG_FCFI mbxStatus error x%x "
+                       "HBA state x%x\n",
+                       mboxq->u.mb.mbxStatus, vport->port_state);
+       }
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       return;
+}
+
+/**
+ * lpfc_unregister_unused_fcf - Unregister FCF if all devices are disconnected.
+ * @phba: Pointer to hba context object.
+ *
+ * This function check if there are any connected remote port for the FCF and
+ * if all the devices are disconnected, this function unregister FCFI.
+ * This function also tries to use another FCF for discovery.
+ */
+void
+lpfc_unregister_unused_fcf(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc;
+       struct lpfc_vport **vports;
+       int i;
+
+       spin_lock_irq(&phba->hbalock);
+       /*
+        * If HBA is not running in FIP mode or
+        * If HBA does not support FCoE or
+        * If FCF is not registered.
+        * do nothing.
+        */
+       if (!(phba->hba_flag & HBA_FCOE_SUPPORT) ||
+               !(phba->fcf.fcf_flag & FCF_REGISTERED) ||
+               (phba->cfg_enable_fip == 0)) {
+               spin_unlock_irq(&phba->hbalock);
+               return;
+       }
+       spin_unlock_irq(&phba->hbalock);
+
+       if (lpfc_fcf_inuse(phba))
+               return;
+
+
+       /* Unregister VPIs */
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports &&
+               (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED))
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
+                       lpfc_mbx_unreg_vpi(vports[i]);
+                       vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+                       vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+
+       /* Unregister VFI */
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                       "2556 UNREG_VFI mbox allocation failed"
+                       "HBA state x%x\n",
+                       phba->pport->port_state);
+               return;
+       }
+
+       lpfc_unreg_vfi(mbox, phba->pport->vfi);
+       mbox->vport = phba->pport;
+       mbox->mbox_cmpl = lpfc_unregister_vfi_cmpl;
+
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                       "2557 UNREG_VFI issue mbox failed rc x%x "
+                       "HBA state x%x\n",
+                       rc, phba->pport->port_state);
+               mempool_free(mbox, phba->mbox_mem_pool);
+               return;
+       }
+
+       /* Unregister FCF */
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                       "2551 UNREG_FCFI mbox allocation failed"
+                       "HBA state x%x\n",
+                       phba->pport->port_state);
+               return;
+       }
+
+       lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+       mbox->vport = phba->pport;
+       mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                       "2552 UNREG_FCFI issue mbox failed rc x%x "
+                       "HBA state x%x\n",
+                       rc, phba->pport->port_state);
+               mempool_free(mbox, phba->mbox_mem_pool);
+               return;
+       }
+
+       spin_lock_irq(&phba->hbalock);
+       phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_REGISTERED |
+               FCF_DISCOVERED | FCF_BOOT_ENABLE | FCF_IN_USE |
+               FCF_VALID_VLAN);
+       spin_unlock_irq(&phba->hbalock);
+
+       /*
+        * If driver is not unloading, check if there is any other
+        * FCF record that can be used for discovery.
+        */
+       if ((phba->pport->load_flag & FC_UNLOADING) ||
+               (phba->link_state < LPFC_LINK_UP))
+               return;
+
+       rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+       if (rc)
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+                       "2553 lpfc_unregister_unused_fcf failed to read FCF"
+                       " record HBA state x%x\n",
+                       phba->pport->port_state);
+}
+
+/**
+ * lpfc_read_fcf_conn_tbl - Create driver FCF connection table.
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCF connection table as in the config
+ *         region.
+ * This function create driver data structure for the FCF connection
+ * record table read from config region 23.
+ */
+static void
+lpfc_read_fcf_conn_tbl(struct lpfc_hba *phba,
+       uint8_t *buff)
+{
+       struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
+       struct lpfc_fcf_conn_hdr *conn_hdr;
+       struct lpfc_fcf_conn_rec *conn_rec;
+       uint32_t record_count;
+       int i;
+
+       /* Free the current connect table */
+       list_for_each_entry_safe(conn_entry, next_conn_entry,
+               &phba->fcf_conn_rec_list, list)
+               kfree(conn_entry);
+
+       conn_hdr = (struct lpfc_fcf_conn_hdr *) buff;
+       record_count = conn_hdr->length * sizeof(uint32_t)/
+               sizeof(struct lpfc_fcf_conn_rec);
+
+       conn_rec = (struct lpfc_fcf_conn_rec *)
+               (buff + sizeof(struct lpfc_fcf_conn_hdr));
+
+       for (i = 0; i < record_count; i++) {
+               if (!(conn_rec[i].flags & FCFCNCT_VALID))
+                       continue;
+               conn_entry = kzalloc(sizeof(struct lpfc_fcf_conn_entry),
+                       GFP_KERNEL);
+               if (!conn_entry) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2566 Failed to allocate connection"
+                               " table entry\n");
+                       return;
+               }
+
+               memcpy(&conn_entry->conn_rec, &conn_rec[i],
+                       sizeof(struct lpfc_fcf_conn_rec));
+               conn_entry->conn_rec.vlan_tag =
+                       le16_to_cpu(conn_entry->conn_rec.vlan_tag) & 0xFFF;
+               conn_entry->conn_rec.flags =
+                       le16_to_cpu(conn_entry->conn_rec.flags);
+               list_add_tail(&conn_entry->list,
+                       &phba->fcf_conn_rec_list);
+       }
+}
+
+/**
+ * lpfc_read_fcoe_param - Read FCoe parameters from conf region..
+ * @phba: Pointer to hba context object.
+ * @buff: Buffer containing the FCoE parameter data structure.
+ *
+ *  This function update driver data structure with config
+ *  parameters read from config region 23.
+ */
+static void
+lpfc_read_fcoe_param(struct lpfc_hba *phba,
+                       uint8_t *buff)
+{
+       struct lpfc_fip_param_hdr *fcoe_param_hdr;
+       struct lpfc_fcoe_params *fcoe_param;
+
+       fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
+               buff;
+       fcoe_param = (struct lpfc_fcoe_params *)
+               buff + sizeof(struct lpfc_fip_param_hdr);
+
+       if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
+               (fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
+               return;
+
+       if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
+                       FIPP_MODE_ON)
+               phba->cfg_enable_fip = 1;
+
+       if (bf_get(lpfc_fip_param_hdr_fipp_mode, fcoe_param_hdr) ==
+               FIPP_MODE_OFF)
+               phba->cfg_enable_fip = 0;
+
+       if (fcoe_param_hdr->parm_flags & FIPP_VLAN_VALID) {
+               phba->valid_vlan = 1;
+               phba->vlan_id = le16_to_cpu(fcoe_param->vlan_tag) &
+                       0xFFF;
+       }
+
+       phba->fc_map[0] = fcoe_param->fc_map[0];
+       phba->fc_map[1] = fcoe_param->fc_map[1];
+       phba->fc_map[2] = fcoe_param->fc_map[2];
+       return;
+}
+
+/**
+ * lpfc_get_rec_conf23 - Get a record type in config region data.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ * @rec_type: Record type to be searched.
+ *
+ * This function searches config region data to find the begining
+ * of the record specified by record_type. If record found, this
+ * function return pointer to the record else return NULL.
+ */
+static uint8_t *
+lpfc_get_rec_conf23(uint8_t *buff, uint32_t size, uint8_t rec_type)
+{
+       uint32_t offset = 0, rec_length;
+
+       if ((buff[0] == LPFC_REGION23_LAST_REC) ||
+               (size < sizeof(uint32_t)))
+               return NULL;
+
+       rec_length = buff[offset + 1];
+
+       /*
+        * One TLV record has one word header and number of data words
+        * specified in the rec_length field of the record header.
+        */
+       while ((offset + rec_length * sizeof(uint32_t) + sizeof(uint32_t))
+               <= size) {
+               if (buff[offset] == rec_type)
+                       return &buff[offset];
+
+               if (buff[offset] == LPFC_REGION23_LAST_REC)
+                       return NULL;
+
+               offset += rec_length * sizeof(uint32_t) + sizeof(uint32_t);
+               rec_length = buff[offset + 1];
+       }
+       return NULL;
+}
+
+/**
+ * lpfc_parse_fcoe_conf - Parse FCoE config data read from config region 23.
+ * @phba: Pointer to lpfc_hba data structure.
+ * @buff: Buffer containing config region 23 data.
+ * @size: Size of the data buffer.
+ *
+ * This fuction parse the FCoE config parameters in config region 23 and
+ * populate driver data structure with the parameters.
+ */
+void
+lpfc_parse_fcoe_conf(struct lpfc_hba *phba,
+               uint8_t *buff,
+               uint32_t size)
+{
+       uint32_t offset = 0, rec_length;
+       uint8_t *rec_ptr;
+
+       /*
+        * If data size is less than 2 words signature and version cannot be
+        * verified.
+        */
+       if (size < 2*sizeof(uint32_t))
+               return;
+
+       /* Check the region signature first */
+       if (memcmp(buff, LPFC_REGION23_SIGNATURE, 4)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2567 Config region 23 has bad signature\n");
+               return;
+       }
+
+       offset += 4;
+
+       /* Check the data structure version */
+       if (buff[offset] != LPFC_REGION23_VERSION) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2568 Config region 23 has bad version\n");
+               return;
+       }
+       offset += 4;
+
+       rec_length = buff[offset + 1];
+
+       /* Read FCoE param record */
+       rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+                       size - offset, FCOE_PARAM_TYPE);
+       if (rec_ptr)
+               lpfc_read_fcoe_param(phba, rec_ptr);
+
+       /* Read FCF connection table */
+       rec_ptr = lpfc_get_rec_conf23(&buff[offset],
+               size - offset, FCOE_CONN_TBL_TYPE);
+       if (rec_ptr)
+               lpfc_read_fcf_conn_tbl(phba, rec_ptr);
+
+}
index 4168c7b498b87d6e3b79759eb5887931dbfe5f5f..02aa016b93e926c2f6808f2e83d353e10359df55 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
@@ -470,6 +470,35 @@ struct serv_parm { /* Structure is in Big Endian format */
        uint8_t vendorVersion[16];
 };
 
+/*
+ * Virtual Fabric Tagging Header
+ */
+struct fc_vft_header {
+        uint32_t word0;
+#define fc_vft_hdr_r_ctl_SHIFT         24
+#define fc_vft_hdr_r_ctl_MASK          0xFF
+#define fc_vft_hdr_r_ctl_WORD          word0
+#define fc_vft_hdr_ver_SHIFT           22
+#define fc_vft_hdr_ver_MASK            0x3
+#define fc_vft_hdr_ver_WORD            word0
+#define fc_vft_hdr_type_SHIFT          18
+#define fc_vft_hdr_type_MASK           0xF
+#define fc_vft_hdr_type_WORD           word0
+#define fc_vft_hdr_e_SHIFT             16
+#define fc_vft_hdr_e_MASK              0x1
+#define fc_vft_hdr_e_WORD              word0
+#define fc_vft_hdr_priority_SHIFT      13
+#define fc_vft_hdr_priority_MASK       0x7
+#define fc_vft_hdr_priority_WORD       word0
+#define fc_vft_hdr_vf_id_SHIFT         1
+#define fc_vft_hdr_vf_id_MASK          0xFFF
+#define fc_vft_hdr_vf_id_WORD          word0
+       uint32_t word1;
+#define fc_vft_hdr_hopct_SHIFT         24
+#define fc_vft_hdr_hopct_MASK          0xFF
+#define fc_vft_hdr_hopct_WORD          word1
+};
+
 /*
  *  Extended Link Service LS_COMMAND codes (Payload Word 0)
  */
@@ -1152,6 +1181,9 @@ typedef struct {
 #define PCI_DEVICE_ID_HORNET        0xfe05
 #define PCI_DEVICE_ID_ZEPHYR_SCSP   0xfe11
 #define PCI_DEVICE_ID_ZEPHYR_DCSP   0xfe12
+#define PCI_VENDOR_ID_SERVERENGINE  0x19a2
+#define PCI_DEVICE_ID_TIGERSHARK    0x0704
+#define PCI_DEVICE_ID_TIGERSHARK_S  0x0705
 
 #define JEDEC_ID_ADDRESS            0x0080001c
 #define FIREFLY_JEDEC_ID            0x1ACC
@@ -1342,15 +1374,21 @@ typedef struct {                /* FireFly BIU registers */
 #define MBX_READ_LA64       0x95
 #define MBX_REG_VPI        0x96
 #define MBX_UNREG_VPI      0x97
-#define MBX_REG_VNPID      0x96
-#define MBX_UNREG_VNPID            0x97
 
 #define MBX_WRITE_WWN       0x98
 #define MBX_SET_DEBUG       0x99
 #define MBX_LOAD_EXP_ROM    0x9C
-
-#define MBX_MAX_CMDS        0x9D
+#define MBX_SLI4_CONFIG            0x9B
+#define MBX_SLI4_REQ_FTRS   0x9D
+#define MBX_MAX_CMDS        0x9E
+#define MBX_RESUME_RPI      0x9E
 #define MBX_SLI2_CMD_MASK   0x80
+#define MBX_REG_VFI         0x9F
+#define MBX_REG_FCFI        0xA0
+#define MBX_UNREG_VFI       0xA1
+#define MBX_UNREG_FCFI     0xA2
+#define MBX_INIT_VFI        0xA3
+#define MBX_INIT_VPI        0xA4
 
 /* IOCB Commands */
 
@@ -1440,6 +1478,16 @@ typedef struct {         /* FireFly BIU registers */
 #define CMD_IOCB_LOGENTRY_CN           0x94
 #define CMD_IOCB_LOGENTRY_ASYNC_CN     0x96
 
+/* Unhandled Data Security SLI Commands */
+#define DSSCMD_IWRITE64_CR             0xD8
+#define DSSCMD_IWRITE64_CX             0xD9
+#define DSSCMD_IREAD64_CR              0xDA
+#define DSSCMD_IREAD64_CX              0xDB
+#define DSSCMD_INVALIDATE_DEK          0xDC
+#define DSSCMD_SET_KEK                 0xDD
+#define DSSCMD_GET_KEK_ID              0xDE
+#define DSSCMD_GEN_XFER                        0xDF
+
 #define CMD_MAX_IOCB_CMD        0xE6
 #define CMD_IOCB_MASK           0xff
 
@@ -1466,6 +1514,7 @@ typedef struct {          /* FireFly BIU registers */
 #define MBXERR_BAD_RCV_LENGTH       14
 #define MBXERR_DMA_ERROR            15
 #define MBXERR_ERROR                16
+#define MBXERR_LINK_DOWN            0x33
 #define MBX_NOT_FINISHED           255
 
 #define MBX_BUSY                   0xffffff /* Attempted cmd to busy Mailbox */
@@ -1504,32 +1553,6 @@ struct ulp_bde {
 #endif
 };
 
-struct ulp_bde64 {     /* SLI-2 */
-       union ULP_BDE_TUS {
-               uint32_t w;
-               struct {
-#ifdef __BIG_ENDIAN_BITFIELD
-                       uint32_t bdeFlags:8;    /* BDE Flags 0 IS A SUPPORTED
-                                                  VALUE !! */
-                       uint32_t bdeSize:24;    /* Size of buffer (in bytes) */
-#else  /*  __LITTLE_ENDIAN_BITFIELD */
-                       uint32_t bdeSize:24;    /* Size of buffer (in bytes) */
-                       uint32_t bdeFlags:8;    /* BDE Flags 0 IS A SUPPORTED
-                                                  VALUE !! */
-#endif
-#define BUFF_TYPE_BDE_64    0x00       /* BDE (Host_resident) */
-#define BUFF_TYPE_BDE_IMMED 0x01       /* Immediate Data BDE */
-#define BUFF_TYPE_BDE_64P   0x02       /* BDE (Port-resident) */
-#define BUFF_TYPE_BDE_64I   0x08       /* Input BDE (Host-resident) */
-#define BUFF_TYPE_BDE_64IP  0x0A       /* Input BDE (Port-resident) */
-#define BUFF_TYPE_BLP_64    0x40       /* BLP (Host-resident) */
-#define BUFF_TYPE_BLP_64P   0x42       /* BLP (Port-resident) */
-               } f;
-       } tus;
-       uint32_t addrLow;
-       uint32_t addrHigh;
-};
-
 typedef struct ULP_BDL {       /* SLI-2 */
 #ifdef __BIG_ENDIAN_BITFIELD
        uint32_t bdeFlags:8;    /* BDL Flags */
@@ -2287,7 +2310,7 @@ typedef struct {
        uint32_t rsvd3;
        uint32_t rsvd4;
        uint32_t rsvd5;
-       uint16_t rsvd6;
+       uint16_t vfi;
        uint16_t vpi;
 #else  /*  __LITTLE_ENDIAN */
        uint32_t rsvd1;
@@ -2297,7 +2320,7 @@ typedef struct {
        uint32_t rsvd4;
        uint32_t rsvd5;
        uint16_t vpi;
-       uint16_t rsvd6;
+       uint16_t vfi;
 #endif
 } REG_VPI_VAR;
 
@@ -2457,7 +2480,7 @@ typedef struct {
        uint32_t entry_index:16;
 #endif
 
-       uint32_t rsvd1;
+       uint32_t sli4_length;
        uint32_t word_cnt;
        uint32_t resp_offset;
 } DUMP_VAR;
@@ -2470,9 +2493,32 @@ typedef struct {
 #define  DMP_RSP_OFFSET          0x14   /* word 5 contains first word of rsp */
 #define  DMP_RSP_SIZE            0x6C   /* maximum of 27 words of rsp data */
 
+#define  DMP_REGION_VPORT       0x16   /* VPort info region */
+#define  DMP_VPORT_REGION_SIZE  0x200
+#define  DMP_MBOX_OFFSET_WORD   0x5
+
+#define  DMP_REGION_FCOEPARAM   0x17   /* fcoe param region */
+#define  DMP_FCOEPARAM_RGN_SIZE         0x400
+
 #define  WAKE_UP_PARMS_REGION_ID    4
 #define  WAKE_UP_PARMS_WORD_SIZE   15
 
+struct vport_rec {
+       uint8_t wwpn[8];
+       uint8_t wwnn[8];
+};
+
+#define VPORT_INFO_SIG 0x32324752
+#define VPORT_INFO_REV_MASK 0xff
+#define VPORT_INFO_REV 0x1
+#define MAX_STATIC_VPORT_COUNT 16
+struct static_vport_info {
+       uint32_t                signature;
+       uint32_t                rev;
+       struct vport_rec        vport_list[MAX_STATIC_VPORT_COUNT];
+       uint32_t                resvd[66];
+};
+
 /* Option rom version structure */
 struct prog_id {
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -2697,7 +2743,9 @@ typedef struct {
 #endif
 
 #ifdef __BIG_ENDIAN_BITFIELD
-       uint32_t rsvd1     : 23;  /* Reserved                             */
+       uint32_t rsvd1     : 19;  /* Reserved                             */
+       uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+       uint32_t rsvd2     :  3;  /* Reserved                             */
        uint32_t cbg       :  1;  /* Configure BlockGuard                 */
        uint32_t cmv       :  1;  /* Configure Max VPIs                   */
        uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
@@ -2717,10 +2765,14 @@ typedef struct {
        uint32_t ccrp      :  1;  /* Config Command Ring Polling          */
        uint32_t cmv       :  1;  /* Configure Max VPIs                   */
        uint32_t cbg       :  1;  /* Configure BlockGuard                 */
-       uint32_t rsvd1     : 23;  /* Reserved                             */
+       uint32_t rsvd2     :  3;  /* Reserved                             */
+       uint32_t cdss      :  1;  /* Configure Data Security SLI          */
+       uint32_t rsvd1     : 19;  /* Reserved                             */
 #endif
 #ifdef __BIG_ENDIAN_BITFIELD
-       uint32_t rsvd2     : 23;  /* Reserved                             */
+       uint32_t rsvd3     : 19;  /* Reserved                             */
+       uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+       uint32_t rsvd4     :  3;  /* Reserved                             */
        uint32_t gbg       :  1;  /* Grant BlockGuard                     */
        uint32_t gmv       :  1;  /* Grant Max VPIs                       */
        uint32_t gcrp      :  1;  /* Grant Command Ring Polling           */
@@ -2740,7 +2792,9 @@ typedef struct {
        uint32_t gcrp      :  1;  /* Grant Command Ring Polling           */
        uint32_t gmv       :  1;  /* Grant Max VPIs                       */
        uint32_t gbg       :  1;  /* Grant BlockGuard                     */
-       uint32_t rsvd2     : 23;  /* Reserved                             */
+       uint32_t rsvd4     :  3;  /* Reserved                             */
+       uint32_t gdss      :  1;  /* Configure Data Security SLI          */
+       uint32_t rsvd3     : 19;  /* Reserved                             */
 #endif
 
 #ifdef __BIG_ENDIAN_BITFIELD
@@ -2753,20 +2807,20 @@ typedef struct {
 
 #ifdef __BIG_ENDIAN_BITFIELD
        uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
-       uint32_t rsvd3     : 16;  /* Max HBQs Host expect to configure    */
+       uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
 #else  /*  __LITTLE_ENDIAN */
-       uint32_t rsvd3     : 16;  /* Max HBQs Host expect to configure    */
+       uint32_t rsvd5     : 16;  /* Max HBQs Host expect to configure    */
        uint32_t max_hbq   : 16;  /* Max HBQs Host expect to configure    */
 #endif
 
-       uint32_t rsvd4;           /* Reserved                             */
+       uint32_t rsvd6;           /* Reserved                             */
 
 #ifdef __BIG_ENDIAN_BITFIELD
-       uint32_t rsvd5      : 16;  /* Reserved                             */
+       uint32_t rsvd7      : 16;  /* Reserved                             */
        uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
 #else  /*  __LITTLE_ENDIAN */
        uint32_t max_vpi    : 16;  /* Max number of virt N-Ports           */
-       uint32_t rsvd5      : 16;  /* Reserved                             */
+       uint32_t rsvd7      : 16;  /* Reserved                             */
 #endif
 
 } CONFIG_PORT_VAR;
@@ -3666,3 +3720,5 @@ lpfc_error_lost_link(IOCB_t *iocbp)
 #define MENLO_TIMEOUT 30
 #define SETVAR_MLOMNT 0x103107
 #define SETVAR_MLORST 0x103007
+
+#define BPL_ALIGN_SZ 8 /* 8 byte alignment for bpl and mbufs */
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
new file mode 100644 (file)
index 0000000..39c34b3
--- /dev/null
@@ -0,0 +1,2141 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+/* Macros to deal with bit fields. Each bit field must have 3 #defines
+ * associated with it (_SHIFT, _MASK, and _WORD).
+ * EG. For a bit field that is in the 7th bit of the "field4" field of a
+ * structure and is 2 bits in size the following #defines must exist:
+ *     struct temp {
+ *             uint32_t        field1;
+ *             uint32_t        field2;
+ *             uint32_t        field3;
+ *             uint32_t        field4;
+ *     #define example_bit_field_SHIFT         7
+ *     #define example_bit_field_MASK          0x03
+ *     #define example_bit_field_WORD          field4
+ *             uint32_t        field5;
+ *     };
+ * Then the macros below may be used to get or set the value of that field.
+ * EG. To get the value of the bit field from the above example:
+ *     struct temp t1;
+ *     value = bf_get(example_bit_field, &t1);
+ * And then to set that bit field:
+ *     bf_set(example_bit_field, &t1, 2);
+ * Or clear that bit field:
+ *     bf_set(example_bit_field, &t1, 0);
+ */
+#define bf_get(name, ptr) \
+       (((ptr)->name##_WORD >> name##_SHIFT) & name##_MASK)
+#define bf_set(name, ptr, value) \
+       ((ptr)->name##_WORD = ((((value) & name##_MASK) << name##_SHIFT) | \
+                ((ptr)->name##_WORD & ~(name##_MASK << name##_SHIFT))))
+
+struct dma_address {
+       uint32_t addr_lo;
+       uint32_t addr_hi;
+};
+
+#define LPFC_SLI4_BAR0         1
+#define LPFC_SLI4_BAR1         2
+#define LPFC_SLI4_BAR2         4
+
+#define LPFC_SLI4_MBX_EMBED    true
+#define LPFC_SLI4_MBX_NEMBED   false
+
+#define LPFC_SLI4_MB_WORD_COUNT                64
+#define LPFC_MAX_MQ_PAGE               8
+#define LPFC_MAX_WQ_PAGE               8
+#define LPFC_MAX_CQ_PAGE               4
+#define LPFC_MAX_EQ_PAGE               8
+
+#define LPFC_VIR_FUNC_MAX       32 /* Maximum number of virtual functions */
+#define LPFC_PCI_FUNC_MAX        5 /* Maximum number of PCI functions */
+#define LPFC_VFR_PAGE_SIZE     0x1000 /* 4KB BAR2 per-VF register page size */
+
+/* Define SLI4 Alignment requirements. */
+#define LPFC_ALIGN_16_BYTE     16
+#define LPFC_ALIGN_64_BYTE     64
+
+/* Define SLI4 specific definitions. */
+#define LPFC_MQ_CQE_BYTE_OFFSET        256
+#define LPFC_MBX_CMD_HDR_LENGTH 16
+#define LPFC_MBX_ERROR_RANGE   0x4000
+#define LPFC_BMBX_BIT1_ADDR_HI 0x2
+#define LPFC_BMBX_BIT1_ADDR_LO 0
+#define LPFC_RPI_HDR_COUNT     64
+#define LPFC_HDR_TEMPLATE_SIZE 4096
+#define LPFC_RPI_ALLOC_ERROR   0xFFFF
+#define LPFC_FCF_RECORD_WD_CNT 132
+#define LPFC_ENTIRE_FCF_DATABASE 0
+#define LPFC_DFLT_FCF_INDEX     0
+
+/* Virtual function numbers */
+#define LPFC_VF0               0
+#define LPFC_VF1               1
+#define LPFC_VF2               2
+#define LPFC_VF3               3
+#define LPFC_VF4               4
+#define LPFC_VF5               5
+#define LPFC_VF6               6
+#define LPFC_VF7               7
+#define LPFC_VF8               8
+#define LPFC_VF9               9
+#define LPFC_VF10              10
+#define LPFC_VF11              11
+#define LPFC_VF12              12
+#define LPFC_VF13              13
+#define LPFC_VF14              14
+#define LPFC_VF15              15
+#define LPFC_VF16              16
+#define LPFC_VF17              17
+#define LPFC_VF18              18
+#define LPFC_VF19              19
+#define LPFC_VF20              20
+#define LPFC_VF21              21
+#define LPFC_VF22              22
+#define LPFC_VF23              23
+#define LPFC_VF24              24
+#define LPFC_VF25              25
+#define LPFC_VF26              26
+#define LPFC_VF27              27
+#define LPFC_VF28              28
+#define LPFC_VF29              29
+#define LPFC_VF30              30
+#define LPFC_VF31              31
+
+/* PCI function numbers */
+#define LPFC_PCI_FUNC0         0
+#define LPFC_PCI_FUNC1         1
+#define LPFC_PCI_FUNC2         2
+#define LPFC_PCI_FUNC3         3
+#define LPFC_PCI_FUNC4         4
+
+/* Active interrupt test count */
+#define LPFC_ACT_INTR_CNT      4
+
+/* Delay Multiplier constant */
+#define LPFC_DMULT_CONST       651042
+#define LPFC_MIM_IMAX          636
+#define LPFC_FP_DEF_IMAX       10000
+#define LPFC_SP_DEF_IMAX       10000
+
+struct ulp_bde64 {
+       union ULP_BDE_TUS {
+               uint32_t w;
+               struct {
+#ifdef __BIG_ENDIAN_BITFIELD
+                       uint32_t bdeFlags:8;    /* BDE Flags 0 IS A SUPPORTED
+                                                  VALUE !! */
+                       uint32_t bdeSize:24;    /* Size of buffer (in bytes) */
+#else  /*  __LITTLE_ENDIAN_BITFIELD */
+                       uint32_t bdeSize:24;    /* Size of buffer (in bytes) */
+                       uint32_t bdeFlags:8;    /* BDE Flags 0 IS A SUPPORTED
+                                                  VALUE !! */
+#endif
+#define BUFF_TYPE_BDE_64    0x00       /* BDE (Host_resident) */
+#define BUFF_TYPE_BDE_IMMED 0x01       /* Immediate Data BDE */
+#define BUFF_TYPE_BDE_64P   0x02       /* BDE (Port-resident) */
+#define BUFF_TYPE_BDE_64I   0x08       /* Input BDE (Host-resident) */
+#define BUFF_TYPE_BDE_64IP  0x0A       /* Input BDE (Port-resident) */
+#define BUFF_TYPE_BLP_64    0x40       /* BLP (Host-resident) */
+#define BUFF_TYPE_BLP_64P   0x42       /* BLP (Port-resident) */
+               } f;
+       } tus;
+       uint32_t addrLow;
+       uint32_t addrHigh;
+};
+
+struct lpfc_sli4_flags {
+       uint32_t word0;
+#define lpfc_fip_flag_SHIFT 0
+#define lpfc_fip_flag_MASK 0x00000001
+#define lpfc_fip_flag_WORD word0
+};
+
+/* event queue entry structure */
+struct lpfc_eqe {
+       uint32_t word0;
+#define lpfc_eqe_resource_id_SHIFT     16
+#define lpfc_eqe_resource_id_MASK      0x000000FF
+#define lpfc_eqe_resource_id_WORD      word0
+#define lpfc_eqe_minor_code_SHIFT      4
+#define lpfc_eqe_minor_code_MASK       0x00000FFF
+#define lpfc_eqe_minor_code_WORD       word0
+#define lpfc_eqe_major_code_SHIFT      1
+#define lpfc_eqe_major_code_MASK       0x00000007
+#define lpfc_eqe_major_code_WORD       word0
+#define lpfc_eqe_valid_SHIFT           0
+#define lpfc_eqe_valid_MASK            0x00000001
+#define lpfc_eqe_valid_WORD            word0
+};
+
+/* completion queue entry structure (common fields for all cqe types) */
+struct lpfc_cqe {
+       uint32_t reserved0;
+       uint32_t reserved1;
+       uint32_t reserved2;
+       uint32_t word3;
+#define lpfc_cqe_valid_SHIFT           31
+#define lpfc_cqe_valid_MASK            0x00000001
+#define lpfc_cqe_valid_WORD            word3
+#define lpfc_cqe_code_SHIFT            16
+#define lpfc_cqe_code_MASK             0x000000FF
+#define lpfc_cqe_code_WORD             word3
+};
+
+/* Completion Queue Entry Status Codes */
+#define CQE_STATUS_SUCCESS             0x0
+#define CQE_STATUS_FCP_RSP_FAILURE     0x1
+#define CQE_STATUS_REMOTE_STOP         0x2
+#define CQE_STATUS_LOCAL_REJECT                0x3
+#define CQE_STATUS_NPORT_RJT           0x4
+#define CQE_STATUS_FABRIC_RJT          0x5
+#define CQE_STATUS_NPORT_BSY           0x6
+#define CQE_STATUS_FABRIC_BSY          0x7
+#define CQE_STATUS_INTERMED_RSP                0x8
+#define CQE_STATUS_LS_RJT              0x9
+#define CQE_STATUS_CMD_REJECT          0xb
+#define CQE_STATUS_FCP_TGT_LENCHECK    0xc
+#define CQE_STATUS_NEED_BUFF_ENTRY     0xf
+
+/* Status returned by hardware (valid only if status = CQE_STATUS_SUCCESS). */
+#define CQE_HW_STATUS_NO_ERR           0x0
+#define CQE_HW_STATUS_UNDERRUN         0x1
+#define CQE_HW_STATUS_OVERRUN          0x2
+
+/* Completion Queue Entry Codes */
+#define CQE_CODE_COMPL_WQE             0x1
+#define CQE_CODE_RELEASE_WQE           0x2
+#define CQE_CODE_RECEIVE               0x4
+#define CQE_CODE_XRI_ABORTED           0x5
+
+/* completion queue entry for wqe completions */
+struct lpfc_wcqe_complete {
+       uint32_t word0;
+#define lpfc_wcqe_c_request_tag_SHIFT  16
+#define lpfc_wcqe_c_request_tag_MASK   0x0000FFFF
+#define lpfc_wcqe_c_request_tag_WORD   word0
+#define lpfc_wcqe_c_status_SHIFT       8
+#define lpfc_wcqe_c_status_MASK                0x000000FF
+#define lpfc_wcqe_c_status_WORD                word0
+#define lpfc_wcqe_c_hw_status_SHIFT    0
+#define lpfc_wcqe_c_hw_status_MASK     0x000000FF
+#define lpfc_wcqe_c_hw_status_WORD     word0
+       uint32_t total_data_placed;
+       uint32_t parameter;
+       uint32_t word3;
+#define lpfc_wcqe_c_valid_SHIFT                lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_c_valid_MASK         lpfc_cqe_valid_MASK
+#define lpfc_wcqe_c_valid_WORD         lpfc_cqe_valid_WORD
+#define lpfc_wcqe_c_xb_SHIFT           28
+#define lpfc_wcqe_c_xb_MASK            0x00000001
+#define lpfc_wcqe_c_xb_WORD            word3
+#define lpfc_wcqe_c_pv_SHIFT           27
+#define lpfc_wcqe_c_pv_MASK            0x00000001
+#define lpfc_wcqe_c_pv_WORD            word3
+#define lpfc_wcqe_c_priority_SHIFT     24
+#define lpfc_wcqe_c_priority_MASK              0x00000007
+#define lpfc_wcqe_c_priority_WORD              word3
+#define lpfc_wcqe_c_code_SHIFT         lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_c_code_MASK          lpfc_cqe_code_MASK
+#define lpfc_wcqe_c_code_WORD          lpfc_cqe_code_WORD
+};
+
+/* completion queue entry for wqe release */
+struct lpfc_wcqe_release {
+       uint32_t reserved0;
+       uint32_t reserved1;
+       uint32_t word2;
+#define lpfc_wcqe_r_wq_id_SHIFT                16
+#define lpfc_wcqe_r_wq_id_MASK         0x0000FFFF
+#define lpfc_wcqe_r_wq_id_WORD         word2
+#define lpfc_wcqe_r_wqe_index_SHIFT    0
+#define lpfc_wcqe_r_wqe_index_MASK     0x0000FFFF
+#define lpfc_wcqe_r_wqe_index_WORD     word2
+       uint32_t word3;
+#define lpfc_wcqe_r_valid_SHIFT                lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_r_valid_MASK         lpfc_cqe_valid_MASK
+#define lpfc_wcqe_r_valid_WORD         lpfc_cqe_valid_WORD
+#define lpfc_wcqe_r_code_SHIFT         lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_r_code_MASK          lpfc_cqe_code_MASK
+#define lpfc_wcqe_r_code_WORD          lpfc_cqe_code_WORD
+};
+
+struct sli4_wcqe_xri_aborted {
+       uint32_t word0;
+#define lpfc_wcqe_xa_status_SHIFT              8
+#define lpfc_wcqe_xa_status_MASK               0x000000FF
+#define lpfc_wcqe_xa_status_WORD               word0
+       uint32_t parameter;
+       uint32_t word2;
+#define lpfc_wcqe_xa_remote_xid_SHIFT  16
+#define lpfc_wcqe_xa_remote_xid_MASK   0x0000FFFF
+#define lpfc_wcqe_xa_remote_xid_WORD   word2
+#define lpfc_wcqe_xa_xri_SHIFT         0
+#define lpfc_wcqe_xa_xri_MASK          0x0000FFFF
+#define lpfc_wcqe_xa_xri_WORD          word2
+       uint32_t word3;
+#define lpfc_wcqe_xa_valid_SHIFT       lpfc_cqe_valid_SHIFT
+#define lpfc_wcqe_xa_valid_MASK                lpfc_cqe_valid_MASK
+#define lpfc_wcqe_xa_valid_WORD                lpfc_cqe_valid_WORD
+#define lpfc_wcqe_xa_ia_SHIFT          30
+#define lpfc_wcqe_xa_ia_MASK           0x00000001
+#define lpfc_wcqe_xa_ia_WORD           word3
+#define CQE_XRI_ABORTED_IA_REMOTE      0
+#define CQE_XRI_ABORTED_IA_LOCAL       1
+#define lpfc_wcqe_xa_br_SHIFT          29
+#define lpfc_wcqe_xa_br_MASK           0x00000001
+#define lpfc_wcqe_xa_br_WORD           word3
+#define CQE_XRI_ABORTED_BR_BA_ACC      0
+#define CQE_XRI_ABORTED_BR_BA_RJT      1
+#define lpfc_wcqe_xa_eo_SHIFT          28
+#define lpfc_wcqe_xa_eo_MASK           0x00000001
+#define lpfc_wcqe_xa_eo_WORD           word3
+#define CQE_XRI_ABORTED_EO_REMOTE      0
+#define CQE_XRI_ABORTED_EO_LOCAL       1
+#define lpfc_wcqe_xa_code_SHIFT                lpfc_cqe_code_SHIFT
+#define lpfc_wcqe_xa_code_MASK         lpfc_cqe_code_MASK
+#define lpfc_wcqe_xa_code_WORD         lpfc_cqe_code_WORD
+};
+
+/* completion queue entry structure for rqe completion */
+struct lpfc_rcqe {
+       uint32_t word0;
+#define lpfc_rcqe_bindex_SHIFT         16
+#define lpfc_rcqe_bindex_MASK          0x0000FFF
+#define lpfc_rcqe_bindex_WORD          word0
+#define lpfc_rcqe_status_SHIFT         8
+#define lpfc_rcqe_status_MASK          0x000000FF
+#define lpfc_rcqe_status_WORD          word0
+#define FC_STATUS_RQ_SUCCESS           0x10 /* Async receive successful */
+#define FC_STATUS_RQ_BUF_LEN_EXCEEDED  0x11 /* payload truncated */
+#define FC_STATUS_INSUFF_BUF_NEED_BUF  0x12 /* Insufficient buffers */
+#define FC_STATUS_INSUFF_BUF_FRM_DISC  0x13 /* Frame Discard */
+       uint32_t reserved1;
+       uint32_t word2;
+#define lpfc_rcqe_length_SHIFT         16
+#define lpfc_rcqe_length_MASK          0x0000FFFF
+#define lpfc_rcqe_length_WORD          word2
+#define lpfc_rcqe_rq_id_SHIFT          6
+#define lpfc_rcqe_rq_id_MASK           0x000003FF
+#define lpfc_rcqe_rq_id_WORD           word2
+#define lpfc_rcqe_fcf_id_SHIFT         0
+#define lpfc_rcqe_fcf_id_MASK          0x0000003F
+#define lpfc_rcqe_fcf_id_WORD          word2
+       uint32_t word3;
+#define lpfc_rcqe_valid_SHIFT          lpfc_cqe_valid_SHIFT
+#define lpfc_rcqe_valid_MASK           lpfc_cqe_valid_MASK
+#define lpfc_rcqe_valid_WORD           lpfc_cqe_valid_WORD
+#define lpfc_rcqe_port_SHIFT           30
+#define lpfc_rcqe_port_MASK            0x00000001
+#define lpfc_rcqe_port_WORD            word3
+#define lpfc_rcqe_hdr_length_SHIFT     24
+#define lpfc_rcqe_hdr_length_MASK      0x0000001F
+#define lpfc_rcqe_hdr_length_WORD      word3
+#define lpfc_rcqe_code_SHIFT           lpfc_cqe_code_SHIFT
+#define lpfc_rcqe_code_MASK            lpfc_cqe_code_MASK
+#define lpfc_rcqe_code_WORD            lpfc_cqe_code_WORD
+#define lpfc_rcqe_eof_SHIFT            8
+#define lpfc_rcqe_eof_MASK             0x000000FF
+#define lpfc_rcqe_eof_WORD             word3
+#define FCOE_EOFn      0x41
+#define FCOE_EOFt      0x42
+#define FCOE_EOFni     0x49
+#define FCOE_EOFa      0x50
+#define lpfc_rcqe_sof_SHIFT            0
+#define lpfc_rcqe_sof_MASK             0x000000FF
+#define lpfc_rcqe_sof_WORD             word3
+#define FCOE_SOFi2     0x2d
+#define FCOE_SOFi3     0x2e
+#define FCOE_SOFn2     0x35
+#define FCOE_SOFn3     0x36
+};
+
+struct lpfc_wqe_generic{
+       struct ulp_bde64 bde;
+       uint32_t word3;
+       uint32_t word4;
+       uint32_t word5;
+       uint32_t word6;
+#define lpfc_wqe_gen_context_SHIFT     16
+#define lpfc_wqe_gen_context_MASK      0x0000FFFF
+#define lpfc_wqe_gen_context_WORD      word6
+#define lpfc_wqe_gen_xri_SHIFT         0
+#define lpfc_wqe_gen_xri_MASK          0x0000FFFF
+#define lpfc_wqe_gen_xri_WORD          word6
+       uint32_t word7;
+#define lpfc_wqe_gen_lnk_SHIFT         23
+#define lpfc_wqe_gen_lnk_MASK          0x00000001
+#define lpfc_wqe_gen_lnk_WORD          word7
+#define lpfc_wqe_gen_erp_SHIFT         22
+#define lpfc_wqe_gen_erp_MASK          0x00000001
+#define lpfc_wqe_gen_erp_WORD          word7
+#define lpfc_wqe_gen_pu_SHIFT          20
+#define lpfc_wqe_gen_pu_MASK           0x00000003
+#define lpfc_wqe_gen_pu_WORD           word7
+#define lpfc_wqe_gen_class_SHIFT       16
+#define lpfc_wqe_gen_class_MASK                0x00000007
+#define lpfc_wqe_gen_class_WORD                word7
+#define lpfc_wqe_gen_command_SHIFT     8
+#define lpfc_wqe_gen_command_MASK      0x000000FF
+#define lpfc_wqe_gen_command_WORD      word7
+#define lpfc_wqe_gen_status_SHIFT      4
+#define lpfc_wqe_gen_status_MASK       0x0000000F
+#define lpfc_wqe_gen_status_WORD       word7
+#define lpfc_wqe_gen_ct_SHIFT          2
+#define lpfc_wqe_gen_ct_MASK           0x00000007
+#define lpfc_wqe_gen_ct_WORD           word7
+       uint32_t abort_tag;
+       uint32_t word9;
+#define lpfc_wqe_gen_request_tag_SHIFT 0
+#define lpfc_wqe_gen_request_tag_MASK  0x0000FFFF
+#define lpfc_wqe_gen_request_tag_WORD  word9
+       uint32_t word10;
+#define lpfc_wqe_gen_ccp_SHIFT         24
+#define lpfc_wqe_gen_ccp_MASK          0x000000FF
+#define lpfc_wqe_gen_ccp_WORD          word10
+#define lpfc_wqe_gen_ccpe_SHIFT                23
+#define lpfc_wqe_gen_ccpe_MASK         0x00000001
+#define lpfc_wqe_gen_ccpe_WORD         word10
+#define lpfc_wqe_gen_pv_SHIFT          19
+#define lpfc_wqe_gen_pv_MASK           0x00000001
+#define lpfc_wqe_gen_pv_WORD           word10
+#define lpfc_wqe_gen_pri_SHIFT         16
+#define lpfc_wqe_gen_pri_MASK          0x00000007
+#define lpfc_wqe_gen_pri_WORD          word10
+       uint32_t word11;
+#define lpfc_wqe_gen_cq_id_SHIFT       16
+#define lpfc_wqe_gen_cq_id_MASK                0x000003FF
+#define lpfc_wqe_gen_cq_id_WORD                word11
+#define LPFC_WQE_CQ_ID_DEFAULT 0x3ff
+#define lpfc_wqe_gen_wqec_SHIFT                7
+#define lpfc_wqe_gen_wqec_MASK         0x00000001
+#define lpfc_wqe_gen_wqec_WORD         word11
+#define lpfc_wqe_gen_cmd_type_SHIFT    0
+#define lpfc_wqe_gen_cmd_type_MASK     0x0000000F
+#define lpfc_wqe_gen_cmd_type_WORD     word11
+       uint32_t payload[4];
+};
+
+struct lpfc_rqe {
+       uint32_t address_hi;
+       uint32_t address_lo;
+};
+
+/* buffer descriptors */
+struct lpfc_bde4 {
+       uint32_t addr_hi;
+       uint32_t addr_lo;
+       uint32_t word2;
+#define lpfc_bde4_last_SHIFT           31
+#define lpfc_bde4_last_MASK            0x00000001
+#define lpfc_bde4_last_WORD            word2
+#define lpfc_bde4_sge_offset_SHIFT     0
+#define lpfc_bde4_sge_offset_MASK      0x000003FF
+#define lpfc_bde4_sge_offset_WORD      word2
+       uint32_t word3;
+#define lpfc_bde4_length_SHIFT         0
+#define lpfc_bde4_length_MASK          0x000000FF
+#define lpfc_bde4_length_WORD          word3
+};
+
+struct lpfc_register {
+       uint32_t word0;
+};
+
+#define LPFC_UERR_STATUS_HI            0x00A4
+#define LPFC_UERR_STATUS_LO            0x00A0
+#define LPFC_ONLINE0                   0x00B0
+#define LPFC_ONLINE1                   0x00B4
+#define LPFC_SCRATCHPAD                        0x0058
+
+/* BAR0 Registers */
+#define LPFC_HST_STATE                 0x00AC
+#define lpfc_hst_state_perr_SHIFT      31
+#define lpfc_hst_state_perr_MASK       0x1
+#define lpfc_hst_state_perr_WORD       word0
+#define lpfc_hst_state_sfi_SHIFT       30
+#define lpfc_hst_state_sfi_MASK                0x1
+#define lpfc_hst_state_sfi_WORD                word0
+#define lpfc_hst_state_nip_SHIFT       29
+#define lpfc_hst_state_nip_MASK                0x1
+#define lpfc_hst_state_nip_WORD                word0
+#define lpfc_hst_state_ipc_SHIFT       28
+#define lpfc_hst_state_ipc_MASK                0x1
+#define lpfc_hst_state_ipc_WORD                word0
+#define lpfc_hst_state_xrom_SHIFT      27
+#define lpfc_hst_state_xrom_MASK       0x1
+#define lpfc_hst_state_xrom_WORD       word0
+#define lpfc_hst_state_dl_SHIFT                26
+#define lpfc_hst_state_dl_MASK         0x1
+#define lpfc_hst_state_dl_WORD         word0
+#define lpfc_hst_state_port_status_SHIFT       0
+#define lpfc_hst_state_port_status_MASK                0xFFFF
+#define lpfc_hst_state_port_status_WORD                word0
+
+#define LPFC_POST_STAGE_POWER_ON_RESET                 0x0000
+#define LPFC_POST_STAGE_AWAITING_HOST_RDY              0x0001
+#define LPFC_POST_STAGE_HOST_RDY                       0x0002
+#define LPFC_POST_STAGE_BE_RESET                       0x0003
+#define LPFC_POST_STAGE_SEEPROM_CS_START               0x0100
+#define LPFC_POST_STAGE_SEEPROM_CS_DONE                        0x0101
+#define LPFC_POST_STAGE_DDR_CONFIG_START               0x0200
+#define LPFC_POST_STAGE_DDR_CONFIG_DONE                        0x0201
+#define LPFC_POST_STAGE_DDR_CALIBRATE_START            0x0300
+#define LPFC_POST_STAGE_DDR_CALIBRATE_DONE             0x0301
+#define LPFC_POST_STAGE_DDR_TEST_START                 0x0400
+#define LPFC_POST_STAGE_DDR_TEST_DONE                  0x0401
+#define LPFC_POST_STAGE_REDBOOT_INIT_START             0x0600
+#define LPFC_POST_STAGE_REDBOOT_INIT_DONE              0x0601
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_START            0x0700
+#define LPFC_POST_STAGE_FW_IMAGE_LOAD_DONE             0x0701
+#define LPFC_POST_STAGE_ARMFW_START                    0x0800
+#define LPFC_POST_STAGE_DHCP_QUERY_START               0x0900
+#define LPFC_POST_STAGE_DHCP_QUERY_DONE                        0x0901
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_START    0x0A00
+#define LPFC_POST_STAGE_BOOT_TARGET_DISCOVERY_DONE     0x0A01
+#define LPFC_POST_STAGE_RC_OPTION_SET                  0x0B00
+#define LPFC_POST_STAGE_SWITCH_LINK                    0x0B01
+#define LPFC_POST_STAGE_SEND_ICDS_MESSAGE              0x0B02
+#define LPFC_POST_STAGE_PERFROM_TFTP                   0x0B03
+#define LPFC_POST_STAGE_PARSE_XML                      0x0B04
+#define LPFC_POST_STAGE_DOWNLOAD_IMAGE                 0x0B05
+#define LPFC_POST_STAGE_FLASH_IMAGE                    0x0B06
+#define LPFC_POST_STAGE_RC_DONE                                0x0B07
+#define LPFC_POST_STAGE_REBOOT_SYSTEM                  0x0B08
+#define LPFC_POST_STAGE_MAC_ADDRESS                    0x0C00
+#define LPFC_POST_STAGE_ARMFW_READY                    0xC000
+#define LPFC_POST_STAGE_ARMFW_UE                       0xF000
+
+#define lpfc_scratchpad_slirev_SHIFT                   4
+#define lpfc_scratchpad_slirev_MASK                    0xF
+#define lpfc_scratchpad_slirev_WORD                    word0
+#define lpfc_scratchpad_chiptype_SHIFT                 8
+#define lpfc_scratchpad_chiptype_MASK                  0xFF
+#define lpfc_scratchpad_chiptype_WORD                  word0
+#define lpfc_scratchpad_featurelevel1_SHIFT            16
+#define lpfc_scratchpad_featurelevel1_MASK             0xFF
+#define lpfc_scratchpad_featurelevel1_WORD             word0
+#define lpfc_scratchpad_featurelevel2_SHIFT            24
+#define lpfc_scratchpad_featurelevel2_MASK             0xFF
+#define lpfc_scratchpad_featurelevel2_WORD             word0
+
+/* BAR1 Registers */
+#define LPFC_IMR_MASK_ALL      0xFFFFFFFF
+#define LPFC_ISCR_CLEAR_ALL    0xFFFFFFFF
+
+#define LPFC_HST_ISR0          0x0C18
+#define LPFC_HST_ISR1          0x0C1C
+#define LPFC_HST_ISR2          0x0C20
+#define LPFC_HST_ISR3          0x0C24
+#define LPFC_HST_ISR4          0x0C28
+
+#define LPFC_HST_IMR0          0x0C48
+#define LPFC_HST_IMR1          0x0C4C
+#define LPFC_HST_IMR2          0x0C50
+#define LPFC_HST_IMR3          0x0C54
+#define LPFC_HST_IMR4          0x0C58
+
+#define LPFC_HST_ISCR0         0x0C78
+#define LPFC_HST_ISCR1         0x0C7C
+#define LPFC_HST_ISCR2         0x0C80
+#define LPFC_HST_ISCR3         0x0C84
+#define LPFC_HST_ISCR4         0x0C88
+
+#define LPFC_SLI4_INTR0                        BIT0
+#define LPFC_SLI4_INTR1                        BIT1
+#define LPFC_SLI4_INTR2                        BIT2
+#define LPFC_SLI4_INTR3                        BIT3
+#define LPFC_SLI4_INTR4                        BIT4
+#define LPFC_SLI4_INTR5                        BIT5
+#define LPFC_SLI4_INTR6                        BIT6
+#define LPFC_SLI4_INTR7                        BIT7
+#define LPFC_SLI4_INTR8                        BIT8
+#define LPFC_SLI4_INTR9                        BIT9
+#define LPFC_SLI4_INTR10               BIT10
+#define LPFC_SLI4_INTR11               BIT11
+#define LPFC_SLI4_INTR12               BIT12
+#define LPFC_SLI4_INTR13               BIT13
+#define LPFC_SLI4_INTR14               BIT14
+#define LPFC_SLI4_INTR15               BIT15
+#define LPFC_SLI4_INTR16               BIT16
+#define LPFC_SLI4_INTR17               BIT17
+#define LPFC_SLI4_INTR18               BIT18
+#define LPFC_SLI4_INTR19               BIT19
+#define LPFC_SLI4_INTR20               BIT20
+#define LPFC_SLI4_INTR21               BIT21
+#define LPFC_SLI4_INTR22               BIT22
+#define LPFC_SLI4_INTR23               BIT23
+#define LPFC_SLI4_INTR24               BIT24
+#define LPFC_SLI4_INTR25               BIT25
+#define LPFC_SLI4_INTR26               BIT26
+#define LPFC_SLI4_INTR27               BIT27
+#define LPFC_SLI4_INTR28               BIT28
+#define LPFC_SLI4_INTR29               BIT29
+#define LPFC_SLI4_INTR30               BIT30
+#define LPFC_SLI4_INTR31               BIT31
+
+/* BAR2 Registers */
+#define LPFC_RQ_DOORBELL               0x00A0
+#define lpfc_rq_doorbell_num_posted_SHIFT      16
+#define lpfc_rq_doorbell_num_posted_MASK       0x3FFF
+#define lpfc_rq_doorbell_num_posted_WORD       word0
+#define LPFC_RQ_POST_BATCH             8       /* RQEs to post at one time */
+#define lpfc_rq_doorbell_id_SHIFT              0
+#define lpfc_rq_doorbell_id_MASK               0x03FF
+#define lpfc_rq_doorbell_id_WORD               word0
+
+#define LPFC_WQ_DOORBELL               0x0040
+#define lpfc_wq_doorbell_num_posted_SHIFT      24
+#define lpfc_wq_doorbell_num_posted_MASK       0x00FF
+#define lpfc_wq_doorbell_num_posted_WORD       word0
+#define lpfc_wq_doorbell_index_SHIFT           16
+#define lpfc_wq_doorbell_index_MASK            0x00FF
+#define lpfc_wq_doorbell_index_WORD            word0
+#define lpfc_wq_doorbell_id_SHIFT              0
+#define lpfc_wq_doorbell_id_MASK               0xFFFF
+#define lpfc_wq_doorbell_id_WORD               word0
+
+#define LPFC_EQCQ_DOORBELL             0x0120
+#define lpfc_eqcq_doorbell_arm_SHIFT           29
+#define lpfc_eqcq_doorbell_arm_MASK            0x0001
+#define lpfc_eqcq_doorbell_arm_WORD            word0
+#define lpfc_eqcq_doorbell_num_released_SHIFT  16
+#define lpfc_eqcq_doorbell_num_released_MASK   0x1FFF
+#define lpfc_eqcq_doorbell_num_released_WORD   word0
+#define lpfc_eqcq_doorbell_qt_SHIFT            10
+#define lpfc_eqcq_doorbell_qt_MASK             0x0001
+#define lpfc_eqcq_doorbell_qt_WORD             word0
+#define LPFC_QUEUE_TYPE_COMPLETION     0
+#define LPFC_QUEUE_TYPE_EVENT          1
+#define lpfc_eqcq_doorbell_eqci_SHIFT          9
+#define lpfc_eqcq_doorbell_eqci_MASK           0x0001
+#define lpfc_eqcq_doorbell_eqci_WORD           word0
+#define lpfc_eqcq_doorbell_cqid_SHIFT          0
+#define lpfc_eqcq_doorbell_cqid_MASK           0x03FF
+#define lpfc_eqcq_doorbell_cqid_WORD           word0
+#define lpfc_eqcq_doorbell_eqid_SHIFT          0
+#define lpfc_eqcq_doorbell_eqid_MASK           0x01FF
+#define lpfc_eqcq_doorbell_eqid_WORD           word0
+
+#define LPFC_BMBX                      0x0160
+#define lpfc_bmbx_addr_SHIFT           2
+#define lpfc_bmbx_addr_MASK            0x3FFFFFFF
+#define lpfc_bmbx_addr_WORD            word0
+#define lpfc_bmbx_hi_SHIFT             1
+#define lpfc_bmbx_hi_MASK              0x0001
+#define lpfc_bmbx_hi_WORD              word0
+#define lpfc_bmbx_rdy_SHIFT            0
+#define lpfc_bmbx_rdy_MASK             0x0001
+#define lpfc_bmbx_rdy_WORD             word0
+
+#define LPFC_MQ_DOORBELL                       0x0140
+#define lpfc_mq_doorbell_num_posted_SHIFT      16
+#define lpfc_mq_doorbell_num_posted_MASK       0x3FFF
+#define lpfc_mq_doorbell_num_posted_WORD       word0
+#define lpfc_mq_doorbell_id_SHIFT              0
+#define lpfc_mq_doorbell_id_MASK               0x03FF
+#define lpfc_mq_doorbell_id_WORD               word0
+
+struct lpfc_sli4_cfg_mhdr {
+       uint32_t word1;
+#define lpfc_mbox_hdr_emb_SHIFT                0
+#define lpfc_mbox_hdr_emb_MASK         0x00000001
+#define lpfc_mbox_hdr_emb_WORD         word1
+#define lpfc_mbox_hdr_sge_cnt_SHIFT    3
+#define lpfc_mbox_hdr_sge_cnt_MASK     0x0000001F
+#define lpfc_mbox_hdr_sge_cnt_WORD     word1
+       uint32_t payload_length;
+       uint32_t tag_lo;
+       uint32_t tag_hi;
+       uint32_t reserved5;
+};
+
+union lpfc_sli4_cfg_shdr {
+       struct {
+               uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT             0
+#define lpfc_mbox_hdr_opcode_MASK              0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD              word6
+#define lpfc_mbox_hdr_subsystem_SHIFT          8
+#define lpfc_mbox_hdr_subsystem_MASK           0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD           word6
+#define lpfc_mbox_hdr_port_number_SHIFT                16
+#define lpfc_mbox_hdr_port_number_MASK         0x000000FF
+#define lpfc_mbox_hdr_port_number_WORD         word6
+#define lpfc_mbox_hdr_domain_SHIFT             24
+#define lpfc_mbox_hdr_domain_MASK              0x000000FF
+#define lpfc_mbox_hdr_domain_WORD              word6
+               uint32_t timeout;
+               uint32_t request_length;
+               uint32_t reserved9;
+       } request;
+       struct {
+               uint32_t word6;
+#define lpfc_mbox_hdr_opcode_SHIFT             0
+#define lpfc_mbox_hdr_opcode_MASK              0x000000FF
+#define lpfc_mbox_hdr_opcode_WORD              word6
+#define lpfc_mbox_hdr_subsystem_SHIFT          8
+#define lpfc_mbox_hdr_subsystem_MASK           0x000000FF
+#define lpfc_mbox_hdr_subsystem_WORD           word6
+#define lpfc_mbox_hdr_domain_SHIFT             24
+#define lpfc_mbox_hdr_domain_MASK              0x000000FF
+#define lpfc_mbox_hdr_domain_WORD              word6
+               uint32_t word7;
+#define lpfc_mbox_hdr_status_SHIFT             0
+#define lpfc_mbox_hdr_status_MASK              0x000000FF
+#define lpfc_mbox_hdr_status_WORD              word7
+#define lpfc_mbox_hdr_add_status_SHIFT         8
+#define lpfc_mbox_hdr_add_status_MASK          0x000000FF
+#define lpfc_mbox_hdr_add_status_WORD          word7
+               uint32_t response_length;
+               uint32_t actual_response_length;
+       } response;
+};
+
+/* Mailbox structures */
+struct mbox_header {
+       struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+       union  lpfc_sli4_cfg_shdr cfg_shdr;
+};
+
+/* Subsystem Definitions */
+#define LPFC_MBOX_SUBSYSTEM_COMMON     0x1
+#define LPFC_MBOX_SUBSYSTEM_FCOE       0xC
+
+/* Device Specific Definitions */
+
+/* The HOST ENDIAN defines are in Big Endian format. */
+#define HOST_ENDIAN_LOW_WORD0   0xFF3412FF
+#define HOST_ENDIAN_HIGH_WORD1 0xFF7856FF
+
+/* Common Opcodes */
+#define LPFC_MBOX_OPCODE_CQ_CREATE             0x0C
+#define LPFC_MBOX_OPCODE_EQ_CREATE             0x0D
+#define LPFC_MBOX_OPCODE_MQ_CREATE             0x15
+#define LPFC_MBOX_OPCODE_GET_CNTL_ATTRIBUTES   0x20
+#define LPFC_MBOX_OPCODE_NOP                   0x21
+#define LPFC_MBOX_OPCODE_MQ_DESTROY            0x35
+#define LPFC_MBOX_OPCODE_CQ_DESTROY            0x36
+#define LPFC_MBOX_OPCODE_EQ_DESTROY            0x37
+#define LPFC_MBOX_OPCODE_FUNCTION_RESET                0x3D
+
+/* FCoE Opcodes */
+#define LPFC_MBOX_OPCODE_FCOE_WQ_CREATE                        0x01
+#define LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY               0x02
+#define LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES           0x03
+#define LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES         0x04
+#define LPFC_MBOX_OPCODE_FCOE_RQ_CREATE                        0x05
+#define LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY               0x06
+#define LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE           0x08
+#define LPFC_MBOX_OPCODE_FCOE_ADD_FCF                  0x09
+#define LPFC_MBOX_OPCODE_FCOE_DELETE_FCF               0x0A
+#define LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE                0x0B
+
+/* Mailbox command structures */
+struct eq_context {
+       uint32_t word0;
+#define lpfc_eq_context_size_SHIFT     31
+#define lpfc_eq_context_size_MASK      0x00000001
+#define lpfc_eq_context_size_WORD      word0
+#define LPFC_EQE_SIZE_4                        0x0
+#define LPFC_EQE_SIZE_16               0x1
+#define lpfc_eq_context_valid_SHIFT    29
+#define lpfc_eq_context_valid_MASK     0x00000001
+#define lpfc_eq_context_valid_WORD     word0
+       uint32_t word1;
+#define lpfc_eq_context_count_SHIFT    26
+#define lpfc_eq_context_count_MASK     0x00000003
+#define lpfc_eq_context_count_WORD     word1
+#define LPFC_EQ_CNT_256                0x0
+#define LPFC_EQ_CNT_512                0x1
+#define LPFC_EQ_CNT_1024       0x2
+#define LPFC_EQ_CNT_2048       0x3
+#define LPFC_EQ_CNT_4096       0x4
+       uint32_t word2;
+#define lpfc_eq_context_delay_multi_SHIFT      13
+#define lpfc_eq_context_delay_multi_MASK       0x000003FF
+#define lpfc_eq_context_delay_multi_WORD       word2
+       uint32_t reserved3;
+};
+
+struct sgl_page_pairs {
+       uint32_t sgl_pg0_addr_lo;
+       uint32_t sgl_pg0_addr_hi;
+       uint32_t sgl_pg1_addr_lo;
+       uint32_t sgl_pg1_addr_hi;
+};
+
+struct lpfc_mbx_post_sgl_pages {
+       struct mbox_header header;
+       uint32_t word0;
+#define lpfc_post_sgl_pages_xri_SHIFT  0
+#define lpfc_post_sgl_pages_xri_MASK   0x0000FFFF
+#define lpfc_post_sgl_pages_xri_WORD   word0
+#define lpfc_post_sgl_pages_xricnt_SHIFT       16
+#define lpfc_post_sgl_pages_xricnt_MASK        0x0000FFFF
+#define lpfc_post_sgl_pages_xricnt_WORD        word0
+       struct sgl_page_pairs  sgl_pg_pairs[1];
+};
+
+/* word0 of page-1 struct shares the same SHIFT/MASK/WORD defines as above */
+struct lpfc_mbx_post_uembed_sgl_page1 {
+       union  lpfc_sli4_cfg_shdr cfg_shdr;
+       uint32_t word0;
+       struct sgl_page_pairs sgl_pg_pairs;
+};
+
+struct lpfc_mbx_sge {
+       uint32_t pa_lo;
+       uint32_t pa_hi;
+       uint32_t length;
+};
+
+struct lpfc_mbx_nembed_cmd {
+       struct lpfc_sli4_cfg_mhdr cfg_mhdr;
+#define LPFC_SLI4_MBX_SGE_MAX_PAGES    19
+       struct lpfc_mbx_sge sge[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_nembed_sge_virt {
+       void *addr[LPFC_SLI4_MBX_SGE_MAX_PAGES];
+};
+
+struct lpfc_mbx_eq_create {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_eq_create_num_pages_SHIFT     0
+#define lpfc_mbx_eq_create_num_pages_MASK      0x0000FFFF
+#define lpfc_mbx_eq_create_num_pages_WORD      word0
+                       struct eq_context context;
+                       struct dma_address page[LPFC_MAX_EQ_PAGE];
+               } request;
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_eq_create_q_id_SHIFT  0
+#define lpfc_mbx_eq_create_q_id_MASK   0x0000FFFF
+#define lpfc_mbx_eq_create_q_id_WORD   word0
+               } response;
+       } u;
+};
+
+struct lpfc_mbx_eq_destroy {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_eq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_eq_destroy_q_id_MASK  0x0000FFFF
+#define lpfc_mbx_eq_destroy_q_id_WORD  word0
+               } request;
+               struct {
+                       uint32_t word0;
+               } response;
+       } u;
+};
+
+struct lpfc_mbx_nop {
+       struct mbox_header header;
+       uint32_t context[2];
+};
+
+struct cq_context {
+       uint32_t word0;
+#define lpfc_cq_context_event_SHIFT    31
+#define lpfc_cq_context_event_MASK     0x00000001
+#define lpfc_cq_context_event_WORD     word0
+#define lpfc_cq_context_valid_SHIFT    29
+#define lpfc_cq_context_valid_MASK     0x00000001
+#define lpfc_cq_context_valid_WORD     word0
+#define lpfc_cq_context_count_SHIFT    27
+#define lpfc_cq_context_count_MASK     0x00000003
+#define lpfc_cq_context_count_WORD     word0
+#define LPFC_CQ_CNT_256                0x0
+#define LPFC_CQ_CNT_512                0x1
+#define LPFC_CQ_CNT_1024       0x2
+       uint32_t word1;
+#define lpfc_cq_eq_id_SHIFT            22
+#define lpfc_cq_eq_id_MASK             0x000000FF
+#define lpfc_cq_eq_id_WORD             word1
+       uint32_t reserved0;
+       uint32_t reserved1;
+};
+
+struct lpfc_mbx_cq_create {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_cq_create_num_pages_SHIFT     0
+#define lpfc_mbx_cq_create_num_pages_MASK      0x0000FFFF
+#define lpfc_mbx_cq_create_num_pages_WORD      word0
+                       struct cq_context context;
+                       struct dma_address page[LPFC_MAX_CQ_PAGE];
+               } request;
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_cq_create_q_id_SHIFT  0
+#define lpfc_mbx_cq_create_q_id_MASK   0x0000FFFF
+#define lpfc_mbx_cq_create_q_id_WORD   word0
+               } response;
+       } u;
+};
+
+struct lpfc_mbx_cq_destroy {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_cq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_cq_destroy_q_id_MASK  0x0000FFFF
+#define lpfc_mbx_cq_destroy_q_id_WORD  word0
+               } request;
+               struct {
+                       uint32_t word0;
+               } response;
+       } u;
+};
+
+struct wq_context {
+       uint32_t reserved0;
+       uint32_t reserved1;
+       uint32_t reserved2;
+       uint32_t reserved3;
+};
+
+struct lpfc_mbx_wq_create {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_wq_create_num_pages_SHIFT     0
+#define lpfc_mbx_wq_create_num_pages_MASK      0x0000FFFF
+#define lpfc_mbx_wq_create_num_pages_WORD      word0
+#define lpfc_mbx_wq_create_cq_id_SHIFT         16
+#define lpfc_mbx_wq_create_cq_id_MASK          0x0000FFFF
+#define lpfc_mbx_wq_create_cq_id_WORD          word0
+                       struct dma_address page[LPFC_MAX_WQ_PAGE];
+               } request;
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_wq_create_q_id_SHIFT  0
+#define lpfc_mbx_wq_create_q_id_MASK   0x0000FFFF
+#define lpfc_mbx_wq_create_q_id_WORD   word0
+               } response;
+       } u;
+};
+
+struct lpfc_mbx_wq_destroy {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_wq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_wq_destroy_q_id_MASK  0x0000FFFF
+#define lpfc_mbx_wq_destroy_q_id_WORD  word0
+               } request;
+               struct {
+                       uint32_t word0;
+               } response;
+       } u;
+};
+
+#define LPFC_HDR_BUF_SIZE 128
+#define LPFC_DATA_BUF_SIZE 4096
+struct rq_context {
+       uint32_t word0;
+#define lpfc_rq_context_rq_size_SHIFT  16
+#define lpfc_rq_context_rq_size_MASK   0x0000000F
+#define lpfc_rq_context_rq_size_WORD   word0
+#define LPFC_RQ_RING_SIZE_512          9       /* 512 entries */
+#define LPFC_RQ_RING_SIZE_1024         10      /* 1024 entries */
+#define LPFC_RQ_RING_SIZE_2048         11      /* 2048 entries */
+#define LPFC_RQ_RING_SIZE_4096         12      /* 4096 entries */
+       uint32_t reserved1;
+       uint32_t word2;
+#define lpfc_rq_context_cq_id_SHIFT    16
+#define lpfc_rq_context_cq_id_MASK     0x000003FF
+#define lpfc_rq_context_cq_id_WORD     word2
+#define lpfc_rq_context_buf_size_SHIFT 0
+#define lpfc_rq_context_buf_size_MASK  0x0000FFFF
+#define lpfc_rq_context_buf_size_WORD  word2
+       uint32_t reserved3;
+};
+
+struct lpfc_mbx_rq_create {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_rq_create_num_pages_SHIFT     0
+#define lpfc_mbx_rq_create_num_pages_MASK      0x0000FFFF
+#define lpfc_mbx_rq_create_num_pages_WORD      word0
+                       struct rq_context context;
+                       struct dma_address page[LPFC_MAX_WQ_PAGE];
+               } request;
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_rq_create_q_id_SHIFT  0
+#define lpfc_mbx_rq_create_q_id_MASK   0x0000FFFF
+#define lpfc_mbx_rq_create_q_id_WORD   word0
+               } response;
+       } u;
+};
+
+struct lpfc_mbx_rq_destroy {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_rq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_rq_destroy_q_id_MASK  0x0000FFFF
+#define lpfc_mbx_rq_destroy_q_id_WORD  word0
+               } request;
+               struct {
+                       uint32_t word0;
+               } response;
+       } u;
+};
+
+struct mq_context {
+       uint32_t word0;
+#define lpfc_mq_context_cq_id_SHIFT    22
+#define lpfc_mq_context_cq_id_MASK     0x000003FF
+#define lpfc_mq_context_cq_id_WORD     word0
+#define lpfc_mq_context_count_SHIFT    16
+#define lpfc_mq_context_count_MASK     0x0000000F
+#define lpfc_mq_context_count_WORD     word0
+#define LPFC_MQ_CNT_16         0x5
+#define LPFC_MQ_CNT_32         0x6
+#define LPFC_MQ_CNT_64         0x7
+#define LPFC_MQ_CNT_128                0x8
+       uint32_t word1;
+#define lpfc_mq_context_valid_SHIFT    31
+#define lpfc_mq_context_valid_MASK     0x00000001
+#define lpfc_mq_context_valid_WORD     word1
+       uint32_t reserved2;
+       uint32_t reserved3;
+};
+
+struct lpfc_mbx_mq_create {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_mq_create_num_pages_SHIFT     0
+#define lpfc_mbx_mq_create_num_pages_MASK      0x0000FFFF
+#define lpfc_mbx_mq_create_num_pages_WORD      word0
+                       struct mq_context context;
+                       struct dma_address page[LPFC_MAX_MQ_PAGE];
+               } request;
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_mq_create_q_id_SHIFT  0
+#define lpfc_mbx_mq_create_q_id_MASK   0x0000FFFF
+#define lpfc_mbx_mq_create_q_id_WORD   word0
+               } response;
+       } u;
+};
+
+struct lpfc_mbx_mq_destroy {
+       struct mbox_header header;
+       union {
+               struct {
+                       uint32_t word0;
+#define lpfc_mbx_mq_destroy_q_id_SHIFT 0
+#define lpfc_mbx_mq_destroy_q_id_MASK  0x0000FFFF
+#define lpfc_mbx_mq_destroy_q_id_WORD  word0
+               } request;
+               struct {
+                       uint32_t word0;
+               } response;
+       } u;
+};
+
+struct lpfc_mbx_post_hdr_tmpl {
+       struct mbox_header header;
+       uint32_t word10;
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_SHIFT  0
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_MASK   0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_rpi_offset_WORD   word10
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_SHIFT   16
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_MASK    0x0000FFFF
+#define lpfc_mbx_post_hdr_tmpl_page_cnt_WORD    word10
+       uint32_t rpi_paddr_lo;
+       uint32_t rpi_paddr_hi;
+};
+
+struct sli4_sge {      /* SLI-4 */
+       uint32_t addr_hi;
+       uint32_t addr_lo;
+
+       uint32_t word2;
+#define lpfc_sli4_sge_offset_SHIFT     0 /* Offset of buffer - Not used*/
+#define lpfc_sli4_sge_offset_MASK      0x00FFFFFF
+#define lpfc_sli4_sge_offset_WORD      word2
+#define lpfc_sli4_sge_last_SHIFT       31 /* Last SEG in the SGL sets
+                                               this  flag !! */
+#define lpfc_sli4_sge_last_MASK                0x00000001
+#define lpfc_sli4_sge_last_WORD                word2
+       uint32_t word3;
+#define lpfc_sli4_sge_len_SHIFT                0
+#define lpfc_sli4_sge_len_MASK         0x0001FFFF
+#define lpfc_sli4_sge_len_WORD         word3
+};
+
+struct fcf_record {
+       uint32_t max_rcv_size;
+       uint32_t fka_adv_period;
+       uint32_t fip_priority;
+       uint32_t word3;
+#define lpfc_fcf_record_mac_0_SHIFT            0
+#define lpfc_fcf_record_mac_0_MASK             0x000000FF
+#define lpfc_fcf_record_mac_0_WORD             word3
+#define lpfc_fcf_record_mac_1_SHIFT            8
+#define lpfc_fcf_record_mac_1_MASK             0x000000FF
+#define lpfc_fcf_record_mac_1_WORD             word3
+#define lpfc_fcf_record_mac_2_SHIFT            16
+#define lpfc_fcf_record_mac_2_MASK             0x000000FF
+#define lpfc_fcf_record_mac_2_WORD             word3
+#define lpfc_fcf_record_mac_3_SHIFT            24
+#define lpfc_fcf_record_mac_3_MASK             0x000000FF
+#define lpfc_fcf_record_mac_3_WORD             word3
+       uint32_t word4;
+#define lpfc_fcf_record_mac_4_SHIFT            0
+#define lpfc_fcf_record_mac_4_MASK             0x000000FF
+#define lpfc_fcf_record_mac_4_WORD             word4
+#define lpfc_fcf_record_mac_5_SHIFT            8
+#define lpfc_fcf_record_mac_5_MASK             0x000000FF
+#define lpfc_fcf_record_mac_5_WORD             word4
+#define lpfc_fcf_record_fcf_avail_SHIFT                16
+#define lpfc_fcf_record_fcf_avail_MASK         0x000000FF
+#define lpfc_fcf_record_fc_avail_WORD          word4
+#define lpfc_fcf_record_mac_addr_prov_SHIFT    24
+#define lpfc_fcf_record_mac_addr_prov_MASK     0x000000FF
+#define lpfc_fcf_record_mac_addr_prov_WORD     word4
+#define LPFC_FCF_FPMA           1      /* Fabric Provided MAC Address */
+#define LPFC_FCF_SPMA           2       /* Server Provided MAC Address */
+       uint32_t word5;
+#define lpfc_fcf_record_fab_name_0_SHIFT       0
+#define lpfc_fcf_record_fab_name_0_MASK                0x000000FF
+#define lpfc_fcf_record_fab_name_0_WORD                word5
+#define lpfc_fcf_record_fab_name_1_SHIFT       8
+#define lpfc_fcf_record_fab_name_1_MASK                0x000000FF
+#define lpfc_fcf_record_fab_name_1_WORD                word5
+#define lpfc_fcf_record_fab_name_2_SHIFT       16
+#define lpfc_fcf_record_fab_name_2_MASK                0x000000FF
+#define lpfc_fcf_record_fab_name_2_WORD                word5
+#define lpfc_fcf_record_fab_name_3_SHIFT       24
+#define lpfc_fcf_record_fab_name_3_MASK                0x000000FF
+#define lpfc_fcf_record_fab_name_3_WORD                word5
+       uint32_t word6;
+#define lpfc_fcf_record_fab_name_4_SHIFT       0
+#define lpfc_fcf_record_fab_name_4_MASK                0x000000FF
+#define lpfc_fcf_record_fab_name_4_WORD                word6
+#define lpfc_fcf_record_fab_name_5_SHIFT       8
+#define lpfc_fcf_record_fab_name_5_MASK                0x000000FF
+#define lpfc_fcf_record_fab_name_5_WORD                word6
+#define lpfc_fcf_record_fab_name_6_SHIFT       16
+#define lpfc_fcf_record_fab_name_6_MASK                0x000000FF
+#define lpfc_fcf_record_fab_name_6_WORD                word6
+#define lpfc_fcf_record_fab_name_7_SHIFT       24
+#define lpfc_fcf_record_fab_name_7_MASK                0x000000FF
+#define lpfc_fcf_record_fab_name_7_WORD                word6
+       uint32_t word7;
+#define lpfc_fcf_record_fc_map_0_SHIFT         0
+#define lpfc_fcf_record_fc_map_0_MASK          0x000000FF
+#define lpfc_fcf_record_fc_map_0_WORD          word7
+#define lpfc_fcf_record_fc_map_1_SHIFT         8
+#define lpfc_fcf_record_fc_map_1_MASK          0x000000FF
+#define lpfc_fcf_record_fc_map_1_WORD          word7
+#define lpfc_fcf_record_fc_map_2_SHIFT         16
+#define lpfc_fcf_record_fc_map_2_MASK          0x000000FF
+#define lpfc_fcf_record_fc_map_2_WORD          word7
+#define lpfc_fcf_record_fcf_valid_SHIFT                24
+#define lpfc_fcf_record_fcf_valid_MASK         0x000000FF
+#define lpfc_fcf_record_fcf_valid_WORD         word7
+       uint32_t word8;
+#define lpfc_fcf_record_fcf_index_SHIFT                0
+#define lpfc_fcf_record_fcf_index_MASK         0x0000FFFF
+#define lpfc_fcf_record_fcf_index_WORD         word8
+#define lpfc_fcf_record_fcf_state_SHIFT                16
+#define lpfc_fcf_record_fcf_state_MASK         0x0000FFFF
+#define lpfc_fcf_record_fcf_state_WORD         word8
+       uint8_t vlan_bitmap[512];
+};
+
+struct lpfc_mbx_read_fcf_tbl {
+       union lpfc_sli4_cfg_shdr cfg_shdr;
+       union {
+               struct {
+                       uint32_t word10;
+#define lpfc_mbx_read_fcf_tbl_indx_SHIFT       0
+#define lpfc_mbx_read_fcf_tbl_indx_MASK                0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_indx_WORD                word10
+               } request;
+               struct {
+                       uint32_t eventag;
+               } response;
+       } u;
+       uint32_t word11;
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_SHIFT  0
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_MASK   0x0000FFFF
+#define lpfc_mbx_read_fcf_tbl_nxt_vindx_WORD   word11
+};
+
+struct lpfc_mbx_add_fcf_tbl_entry {
+       union lpfc_sli4_cfg_shdr cfg_shdr;
+       uint32_t word10;
+#define lpfc_mbx_add_fcf_tbl_fcfi_SHIFT        0
+#define lpfc_mbx_add_fcf_tbl_fcfi_MASK         0x0000FFFF
+#define lpfc_mbx_add_fcf_tbl_fcfi_WORD         word10
+       struct lpfc_mbx_sge fcf_sge;
+};
+
+struct lpfc_mbx_del_fcf_tbl_entry {
+       struct mbox_header header;
+       uint32_t word10;
+#define lpfc_mbx_del_fcf_tbl_count_SHIFT       0
+#define lpfc_mbx_del_fcf_tbl_count_MASK                0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_count_WORD                word10
+#define lpfc_mbx_del_fcf_tbl_index_SHIFT       16
+#define lpfc_mbx_del_fcf_tbl_index_MASK                0x0000FFFF
+#define lpfc_mbx_del_fcf_tbl_index_WORD                word10
+};
+
+/* Status field for embedded SLI_CONFIG mailbox command */
+#define STATUS_SUCCESS                                 0x0
+#define STATUS_FAILED                                  0x1
+#define STATUS_ILLEGAL_REQUEST                         0x2
+#define STATUS_ILLEGAL_FIELD                           0x3
+#define STATUS_INSUFFICIENT_BUFFER                     0x4
+#define STATUS_UNAUTHORIZED_REQUEST                    0x5
+#define STATUS_FLASHROM_SAVE_FAILED                    0x17
+#define STATUS_FLASHROM_RESTORE_FAILED                 0x18
+#define STATUS_ICCBINDEX_ALLOC_FAILED                  0x1a
+#define STATUS_IOCTLHANDLE_ALLOC_FAILED                0x1b
+#define STATUS_INVALID_PHY_ADDR_FROM_OSM               0x1c
+#define STATUS_INVALID_PHY_ADDR_LEN_FROM_OSM           0x1d
+#define STATUS_ASSERT_FAILED                           0x1e
+#define STATUS_INVALID_SESSION                         0x1f
+#define STATUS_INVALID_CONNECTION                      0x20
+#define STATUS_BTL_PATH_EXCEEDS_OSM_LIMIT              0x21
+#define STATUS_BTL_NO_FREE_SLOT_PATH                   0x24
+#define STATUS_BTL_NO_FREE_SLOT_TGTID                  0x25
+#define STATUS_OSM_DEVSLOT_NOT_FOUND                   0x26
+#define STATUS_FLASHROM_READ_FAILED                    0x27
+#define STATUS_POLL_IOCTL_TIMEOUT                      0x28
+#define STATUS_ERROR_ACITMAIN                          0x2a
+#define STATUS_REBOOT_REQUIRED                         0x2c
+#define STATUS_FCF_IN_USE                              0x3a
+
+struct lpfc_mbx_sli4_config {
+       struct mbox_header header;
+};
+
+struct lpfc_mbx_init_vfi {
+       uint32_t word1;
+#define lpfc_init_vfi_vr_SHIFT         31
+#define lpfc_init_vfi_vr_MASK          0x00000001
+#define lpfc_init_vfi_vr_WORD          word1
+#define lpfc_init_vfi_vt_SHIFT         30
+#define lpfc_init_vfi_vt_MASK          0x00000001
+#define lpfc_init_vfi_vt_WORD          word1
+#define lpfc_init_vfi_vf_SHIFT         29
+#define lpfc_init_vfi_vf_MASK          0x00000001
+#define lpfc_init_vfi_vf_WORD          word1
+#define lpfc_init_vfi_vfi_SHIFT                0
+#define lpfc_init_vfi_vfi_MASK         0x0000FFFF
+#define lpfc_init_vfi_vfi_WORD         word1
+       uint32_t word2;
+#define lpfc_init_vfi_fcfi_SHIFT       0
+#define lpfc_init_vfi_fcfi_MASK                0x0000FFFF
+#define lpfc_init_vfi_fcfi_WORD                word2
+       uint32_t word3;
+#define lpfc_init_vfi_pri_SHIFT                13
+#define lpfc_init_vfi_pri_MASK         0x00000007
+#define lpfc_init_vfi_pri_WORD         word3
+#define lpfc_init_vfi_vf_id_SHIFT      1
+#define lpfc_init_vfi_vf_id_MASK       0x00000FFF
+#define lpfc_init_vfi_vf_id_WORD       word3
+       uint32_t word4;
+#define lpfc_init_vfi_hop_count_SHIFT  24
+#define lpfc_init_vfi_hop_count_MASK   0x000000FF
+#define lpfc_init_vfi_hop_count_WORD   word4
+};
+
+struct lpfc_mbx_reg_vfi {
+       uint32_t word1;
+#define lpfc_reg_vfi_vp_SHIFT          28
+#define lpfc_reg_vfi_vp_MASK           0x00000001
+#define lpfc_reg_vfi_vp_WORD           word1
+#define lpfc_reg_vfi_vfi_SHIFT         0
+#define lpfc_reg_vfi_vfi_MASK          0x0000FFFF
+#define lpfc_reg_vfi_vfi_WORD          word1
+       uint32_t word2;
+#define lpfc_reg_vfi_vpi_SHIFT         16
+#define lpfc_reg_vfi_vpi_MASK          0x0000FFFF
+#define lpfc_reg_vfi_vpi_WORD          word2
+#define lpfc_reg_vfi_fcfi_SHIFT                0
+#define lpfc_reg_vfi_fcfi_MASK         0x0000FFFF
+#define lpfc_reg_vfi_fcfi_WORD         word2
+       uint32_t word3_rsvd;
+       uint32_t word4_rsvd;
+       struct ulp_bde64 bde;
+       uint32_t word8_rsvd;
+       uint32_t word9_rsvd;
+       uint32_t word10;
+#define lpfc_reg_vfi_nport_id_SHIFT            0
+#define lpfc_reg_vfi_nport_id_MASK             0x00FFFFFF
+#define lpfc_reg_vfi_nport_id_WORD             word10
+};
+
+struct lpfc_mbx_init_vpi {
+       uint32_t word1;
+#define lpfc_init_vpi_vfi_SHIFT                16
+#define lpfc_init_vpi_vfi_MASK         0x0000FFFF
+#define lpfc_init_vpi_vfi_WORD         word1
+#define lpfc_init_vpi_vpi_SHIFT                0
+#define lpfc_init_vpi_vpi_MASK         0x0000FFFF
+#define lpfc_init_vpi_vpi_WORD         word1
+};
+
+struct lpfc_mbx_read_vpi {
+       uint32_t word1_rsvd;
+       uint32_t word2;
+#define lpfc_mbx_read_vpi_vnportid_SHIFT       0
+#define lpfc_mbx_read_vpi_vnportid_MASK                0x00FFFFFF
+#define lpfc_mbx_read_vpi_vnportid_WORD                word2
+       uint32_t word3_rsvd;
+       uint32_t word4;
+#define lpfc_mbx_read_vpi_acq_alpa_SHIFT       0
+#define lpfc_mbx_read_vpi_acq_alpa_MASK                0x000000FF
+#define lpfc_mbx_read_vpi_acq_alpa_WORD                word4
+#define lpfc_mbx_read_vpi_pb_SHIFT             15
+#define lpfc_mbx_read_vpi_pb_MASK              0x00000001
+#define lpfc_mbx_read_vpi_pb_WORD              word4
+#define lpfc_mbx_read_vpi_spec_alpa_SHIFT      16
+#define lpfc_mbx_read_vpi_spec_alpa_MASK       0x000000FF
+#define lpfc_mbx_read_vpi_spec_alpa_WORD       word4
+#define lpfc_mbx_read_vpi_ns_SHIFT             30
+#define lpfc_mbx_read_vpi_ns_MASK              0x00000001
+#define lpfc_mbx_read_vpi_ns_WORD              word4
+#define lpfc_mbx_read_vpi_hl_SHIFT             31
+#define lpfc_mbx_read_vpi_hl_MASK              0x00000001
+#define lpfc_mbx_read_vpi_hl_WORD              word4
+       uint32_t word5_rsvd;
+       uint32_t word6;
+#define lpfc_mbx_read_vpi_vpi_SHIFT            0
+#define lpfc_mbx_read_vpi_vpi_MASK             0x0000FFFF
+#define lpfc_mbx_read_vpi_vpi_WORD             word6
+       uint32_t word7;
+#define lpfc_mbx_read_vpi_mac_0_SHIFT          0
+#define lpfc_mbx_read_vpi_mac_0_MASK           0x000000FF
+#define lpfc_mbx_read_vpi_mac_0_WORD           word7
+#define lpfc_mbx_read_vpi_mac_1_SHIFT          8
+#define lpfc_mbx_read_vpi_mac_1_MASK           0x000000FF
+#define lpfc_mbx_read_vpi_mac_1_WORD           word7
+#define lpfc_mbx_read_vpi_mac_2_SHIFT          16
+#define lpfc_mbx_read_vpi_mac_2_MASK           0x000000FF
+#define lpfc_mbx_read_vpi_mac_2_WORD           word7
+#define lpfc_mbx_read_vpi_mac_3_SHIFT          24
+#define lpfc_mbx_read_vpi_mac_3_MASK           0x000000FF
+#define lpfc_mbx_read_vpi_mac_3_WORD           word7
+       uint32_t word8;
+#define lpfc_mbx_read_vpi_mac_4_SHIFT          0
+#define lpfc_mbx_read_vpi_mac_4_MASK           0x000000FF
+#define lpfc_mbx_read_vpi_mac_4_WORD           word8
+#define lpfc_mbx_read_vpi_mac_5_SHIFT          8
+#define lpfc_mbx_read_vpi_mac_5_MASK           0x000000FF
+#define lpfc_mbx_read_vpi_mac_5_WORD           word8
+#define lpfc_mbx_read_vpi_vlan_tag_SHIFT       16
+#define lpfc_mbx_read_vpi_vlan_tag_MASK                0x00000FFF
+#define lpfc_mbx_read_vpi_vlan_tag_WORD                word8
+#define lpfc_mbx_read_vpi_vv_SHIFT             28
+#define lpfc_mbx_read_vpi_vv_MASK              0x0000001
+#define lpfc_mbx_read_vpi_vv_WORD              word8
+};
+
+struct lpfc_mbx_unreg_vfi {
+       uint32_t word1_rsvd;
+       uint32_t word2;
+#define lpfc_unreg_vfi_vfi_SHIFT       0
+#define lpfc_unreg_vfi_vfi_MASK                0x0000FFFF
+#define lpfc_unreg_vfi_vfi_WORD                word2
+};
+
+struct lpfc_mbx_resume_rpi {
+       uint32_t word1;
+#define lpfc_resume_rpi_rpi_SHIFT      0
+#define lpfc_resume_rpi_rpi_MASK       0x0000FFFF
+#define lpfc_resume_rpi_rpi_WORD       word1
+       uint32_t event_tag;
+       uint32_t word3_rsvd;
+       uint32_t word4_rsvd;
+       uint32_t word5_rsvd;
+       uint32_t word6;
+#define lpfc_resume_rpi_vpi_SHIFT      0
+#define lpfc_resume_rpi_vpi_MASK       0x0000FFFF
+#define lpfc_resume_rpi_vpi_WORD       word6
+#define lpfc_resume_rpi_vfi_SHIFT      16
+#define lpfc_resume_rpi_vfi_MASK       0x0000FFFF
+#define lpfc_resume_rpi_vfi_WORD       word6
+};
+
+#define REG_FCF_INVALID_QID    0xFFFF
+struct lpfc_mbx_reg_fcfi {
+       uint32_t word1;
+#define lpfc_reg_fcfi_info_index_SHIFT 0
+#define lpfc_reg_fcfi_info_index_MASK  0x0000FFFF
+#define lpfc_reg_fcfi_info_index_WORD  word1
+#define lpfc_reg_fcfi_fcfi_SHIFT       16
+#define lpfc_reg_fcfi_fcfi_MASK                0x0000FFFF
+#define lpfc_reg_fcfi_fcfi_WORD                word1
+       uint32_t word2;
+#define lpfc_reg_fcfi_rq_id1_SHIFT     0
+#define lpfc_reg_fcfi_rq_id1_MASK      0x0000FFFF
+#define lpfc_reg_fcfi_rq_id1_WORD      word2
+#define lpfc_reg_fcfi_rq_id0_SHIFT     16
+#define lpfc_reg_fcfi_rq_id0_MASK      0x0000FFFF
+#define lpfc_reg_fcfi_rq_id0_WORD      word2
+       uint32_t word3;
+#define lpfc_reg_fcfi_rq_id3_SHIFT     0
+#define lpfc_reg_fcfi_rq_id3_MASK      0x0000FFFF
+#define lpfc_reg_fcfi_rq_id3_WORD      word3
+#define lpfc_reg_fcfi_rq_id2_SHIFT     16
+#define lpfc_reg_fcfi_rq_id2_MASK      0x0000FFFF
+#define lpfc_reg_fcfi_rq_id2_WORD      word3
+       uint32_t word4;
+#define lpfc_reg_fcfi_type_match0_SHIFT        24
+#define lpfc_reg_fcfi_type_match0_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match0_WORD word4
+#define lpfc_reg_fcfi_type_mask0_SHIFT 16
+#define lpfc_reg_fcfi_type_mask0_MASK  0x000000FF
+#define lpfc_reg_fcfi_type_mask0_WORD  word4
+#define lpfc_reg_fcfi_rctl_match0_SHIFT        8
+#define lpfc_reg_fcfi_rctl_match0_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match0_WORD word4
+#define lpfc_reg_fcfi_rctl_mask0_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask0_MASK  0x000000FF
+#define lpfc_reg_fcfi_rctl_mask0_WORD  word4
+       uint32_t word5;
+#define lpfc_reg_fcfi_type_match1_SHIFT        24
+#define lpfc_reg_fcfi_type_match1_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match1_WORD word5
+#define lpfc_reg_fcfi_type_mask1_SHIFT 16
+#define lpfc_reg_fcfi_type_mask1_MASK  0x000000FF
+#define lpfc_reg_fcfi_type_mask1_WORD  word5
+#define lpfc_reg_fcfi_rctl_match1_SHIFT        8
+#define lpfc_reg_fcfi_rctl_match1_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match1_WORD word5
+#define lpfc_reg_fcfi_rctl_mask1_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask1_MASK  0x000000FF
+#define lpfc_reg_fcfi_rctl_mask1_WORD  word5
+       uint32_t word6;
+#define lpfc_reg_fcfi_type_match2_SHIFT        24
+#define lpfc_reg_fcfi_type_match2_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match2_WORD word6
+#define lpfc_reg_fcfi_type_mask2_SHIFT 16
+#define lpfc_reg_fcfi_type_mask2_MASK  0x000000FF
+#define lpfc_reg_fcfi_type_mask2_WORD  word6
+#define lpfc_reg_fcfi_rctl_match2_SHIFT        8
+#define lpfc_reg_fcfi_rctl_match2_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match2_WORD word6
+#define lpfc_reg_fcfi_rctl_mask2_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask2_MASK  0x000000FF
+#define lpfc_reg_fcfi_rctl_mask2_WORD  word6
+       uint32_t word7;
+#define lpfc_reg_fcfi_type_match3_SHIFT        24
+#define lpfc_reg_fcfi_type_match3_MASK 0x000000FF
+#define lpfc_reg_fcfi_type_match3_WORD word7
+#define lpfc_reg_fcfi_type_mask3_SHIFT 16
+#define lpfc_reg_fcfi_type_mask3_MASK  0x000000FF
+#define lpfc_reg_fcfi_type_mask3_WORD  word7
+#define lpfc_reg_fcfi_rctl_match3_SHIFT        8
+#define lpfc_reg_fcfi_rctl_match3_MASK 0x000000FF
+#define lpfc_reg_fcfi_rctl_match3_WORD word7
+#define lpfc_reg_fcfi_rctl_mask3_SHIFT 0
+#define lpfc_reg_fcfi_rctl_mask3_MASK  0x000000FF
+#define lpfc_reg_fcfi_rctl_mask3_WORD  word7
+       uint32_t word8;
+#define lpfc_reg_fcfi_mam_SHIFT                13
+#define lpfc_reg_fcfi_mam_MASK         0x00000003
+#define lpfc_reg_fcfi_mam_WORD         word8
+#define LPFC_MAM_BOTH          0       /* Both SPMA and FPMA */
+#define LPFC_MAM_SPMA          1       /* Server Provided MAC Address */
+#define LPFC_MAM_FPMA          2       /* Fabric Provided MAC Address */
+#define lpfc_reg_fcfi_vv_SHIFT         12
+#define lpfc_reg_fcfi_vv_MASK          0x00000001
+#define lpfc_reg_fcfi_vv_WORD          word8
+#define lpfc_reg_fcfi_vlan_tag_SHIFT   0
+#define lpfc_reg_fcfi_vlan_tag_MASK    0x00000FFF
+#define lpfc_reg_fcfi_vlan_tag_WORD    word8
+};
+
+struct lpfc_mbx_unreg_fcfi {
+       uint32_t word1_rsv;
+       uint32_t word2;
+#define lpfc_unreg_fcfi_SHIFT          0
+#define lpfc_unreg_fcfi_MASK           0x0000FFFF
+#define lpfc_unreg_fcfi_WORD           word2
+};
+
+struct lpfc_mbx_read_rev {
+       uint32_t word1;
+#define lpfc_mbx_rd_rev_sli_lvl_SHIFT                  16
+#define lpfc_mbx_rd_rev_sli_lvl_MASK                   0x0000000F
+#define lpfc_mbx_rd_rev_sli_lvl_WORD                   word1
+#define lpfc_mbx_rd_rev_fcoe_SHIFT             20
+#define lpfc_mbx_rd_rev_fcoe_MASK              0x00000001
+#define lpfc_mbx_rd_rev_fcoe_WORD              word1
+#define lpfc_mbx_rd_rev_vpd_SHIFT              29
+#define lpfc_mbx_rd_rev_vpd_MASK               0x00000001
+#define lpfc_mbx_rd_rev_vpd_WORD               word1
+       uint32_t first_hw_rev;
+       uint32_t second_hw_rev;
+       uint32_t word4_rsvd;
+       uint32_t third_hw_rev;
+       uint32_t word6;
+#define lpfc_mbx_rd_rev_fcph_low_SHIFT         0
+#define lpfc_mbx_rd_rev_fcph_low_MASK          0x000000FF
+#define lpfc_mbx_rd_rev_fcph_low_WORD          word6
+#define lpfc_mbx_rd_rev_fcph_high_SHIFT                8
+#define lpfc_mbx_rd_rev_fcph_high_MASK         0x000000FF
+#define lpfc_mbx_rd_rev_fcph_high_WORD         word6
+#define lpfc_mbx_rd_rev_ftr_lvl_low_SHIFT      16
+#define lpfc_mbx_rd_rev_ftr_lvl_low_MASK       0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_low_WORD       word6
+#define lpfc_mbx_rd_rev_ftr_lvl_high_SHIFT     24
+#define lpfc_mbx_rd_rev_ftr_lvl_high_MASK      0x000000FF
+#define lpfc_mbx_rd_rev_ftr_lvl_high_WORD      word6
+       uint32_t word7_rsvd;
+       uint32_t fw_id_rev;
+       uint8_t  fw_name[16];
+       uint32_t ulp_fw_id_rev;
+       uint8_t  ulp_fw_name[16];
+       uint32_t word18_47_rsvd[30];
+       uint32_t word48;
+#define lpfc_mbx_rd_rev_avail_len_SHIFT                0
+#define lpfc_mbx_rd_rev_avail_len_MASK         0x00FFFFFF
+#define lpfc_mbx_rd_rev_avail_len_WORD         word48
+       uint32_t vpd_paddr_low;
+       uint32_t vpd_paddr_high;
+       uint32_t avail_vpd_len;
+       uint32_t rsvd_52_63[12];
+};
+
+struct lpfc_mbx_read_config {
+       uint32_t word1;
+#define lpfc_mbx_rd_conf_max_bbc_SHIFT         0
+#define lpfc_mbx_rd_conf_max_bbc_MASK          0x000000FF
+#define lpfc_mbx_rd_conf_max_bbc_WORD          word1
+#define lpfc_mbx_rd_conf_init_bbc_SHIFT                8
+#define lpfc_mbx_rd_conf_init_bbc_MASK         0x000000FF
+#define lpfc_mbx_rd_conf_init_bbc_WORD         word1
+       uint32_t word2;
+#define lpfc_mbx_rd_conf_nport_did_SHIFT       0
+#define lpfc_mbx_rd_conf_nport_did_MASK                0x00FFFFFF
+#define lpfc_mbx_rd_conf_nport_did_WORD                word2
+#define lpfc_mbx_rd_conf_topology_SHIFT                24
+#define lpfc_mbx_rd_conf_topology_MASK         0x000000FF
+#define lpfc_mbx_rd_conf_topology_WORD         word2
+       uint32_t word3;
+#define lpfc_mbx_rd_conf_ao_SHIFT              0
+#define lpfc_mbx_rd_conf_ao_MASK               0x00000001
+#define lpfc_mbx_rd_conf_ao_WORD               word3
+#define lpfc_mbx_rd_conf_bb_scn_SHIFT          8
+#define lpfc_mbx_rd_conf_bb_scn_MASK           0x0000000F
+#define lpfc_mbx_rd_conf_bb_scn_WORD           word3
+#define lpfc_mbx_rd_conf_cbb_scn_SHIFT         12
+#define lpfc_mbx_rd_conf_cbb_scn_MASK          0x0000000F
+#define lpfc_mbx_rd_conf_cbb_scn_WORD          word3
+#define lpfc_mbx_rd_conf_mc_SHIFT              29
+#define lpfc_mbx_rd_conf_mc_MASK               0x00000001
+#define lpfc_mbx_rd_conf_mc_WORD               word3
+       uint32_t word4;
+#define lpfc_mbx_rd_conf_e_d_tov_SHIFT         0
+#define lpfc_mbx_rd_conf_e_d_tov_MASK          0x0000FFFF
+#define lpfc_mbx_rd_conf_e_d_tov_WORD          word4
+       uint32_t word5;
+#define lpfc_mbx_rd_conf_lp_tov_SHIFT          0
+#define lpfc_mbx_rd_conf_lp_tov_MASK           0x0000FFFF
+#define lpfc_mbx_rd_conf_lp_tov_WORD           word5
+       uint32_t word6;
+#define lpfc_mbx_rd_conf_r_a_tov_SHIFT         0
+#define lpfc_mbx_rd_conf_r_a_tov_MASK          0x0000FFFF
+#define lpfc_mbx_rd_conf_r_a_tov_WORD          word6
+       uint32_t word7;
+#define lpfc_mbx_rd_conf_r_t_tov_SHIFT         0
+#define lpfc_mbx_rd_conf_r_t_tov_MASK          0x000000FF
+#define lpfc_mbx_rd_conf_r_t_tov_WORD          word7
+       uint32_t word8;
+#define lpfc_mbx_rd_conf_al_tov_SHIFT          0
+#define lpfc_mbx_rd_conf_al_tov_MASK           0x0000000F
+#define lpfc_mbx_rd_conf_al_tov_WORD           word8
+       uint32_t word9;
+#define lpfc_mbx_rd_conf_lmt_SHIFT             0
+#define lpfc_mbx_rd_conf_lmt_MASK              0x0000FFFF
+#define lpfc_mbx_rd_conf_lmt_WORD              word9
+       uint32_t word10;
+#define lpfc_mbx_rd_conf_max_alpa_SHIFT                0
+#define lpfc_mbx_rd_conf_max_alpa_MASK         0x000000FF
+#define lpfc_mbx_rd_conf_max_alpa_WORD         word10
+       uint32_t word11_rsvd;
+       uint32_t word12;
+#define lpfc_mbx_rd_conf_xri_base_SHIFT                0
+#define lpfc_mbx_rd_conf_xri_base_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_base_WORD         word12
+#define lpfc_mbx_rd_conf_xri_count_SHIFT       16
+#define lpfc_mbx_rd_conf_xri_count_MASK                0x0000FFFF
+#define lpfc_mbx_rd_conf_xri_count_WORD                word12
+       uint32_t word13;
+#define lpfc_mbx_rd_conf_rpi_base_SHIFT                0
+#define lpfc_mbx_rd_conf_rpi_base_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_base_WORD         word13
+#define lpfc_mbx_rd_conf_rpi_count_SHIFT       16
+#define lpfc_mbx_rd_conf_rpi_count_MASK                0x0000FFFF
+#define lpfc_mbx_rd_conf_rpi_count_WORD                word13
+       uint32_t word14;
+#define lpfc_mbx_rd_conf_vpi_base_SHIFT                0
+#define lpfc_mbx_rd_conf_vpi_base_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_base_WORD         word14
+#define lpfc_mbx_rd_conf_vpi_count_SHIFT       16
+#define lpfc_mbx_rd_conf_vpi_count_MASK                0x0000FFFF
+#define lpfc_mbx_rd_conf_vpi_count_WORD                word14
+       uint32_t word15;
+#define lpfc_mbx_rd_conf_vfi_base_SHIFT         0
+#define lpfc_mbx_rd_conf_vfi_base_MASK          0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_base_WORD          word15
+#define lpfc_mbx_rd_conf_vfi_count_SHIFT        16
+#define lpfc_mbx_rd_conf_vfi_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_vfi_count_WORD         word15
+       uint32_t word16;
+#define lpfc_mbx_rd_conf_fcfi_base_SHIFT       0
+#define lpfc_mbx_rd_conf_fcfi_base_MASK                0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_base_WORD                word16
+#define lpfc_mbx_rd_conf_fcfi_count_SHIFT      16
+#define lpfc_mbx_rd_conf_fcfi_count_MASK       0x0000FFFF
+#define lpfc_mbx_rd_conf_fcfi_count_WORD       word16
+       uint32_t word17;
+#define lpfc_mbx_rd_conf_rq_count_SHIFT                0
+#define lpfc_mbx_rd_conf_rq_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_rq_count_WORD         word17
+#define lpfc_mbx_rd_conf_eq_count_SHIFT                16
+#define lpfc_mbx_rd_conf_eq_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_eq_count_WORD         word17
+       uint32_t word18;
+#define lpfc_mbx_rd_conf_wq_count_SHIFT                0
+#define lpfc_mbx_rd_conf_wq_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_wq_count_WORD         word18
+#define lpfc_mbx_rd_conf_cq_count_SHIFT                16
+#define lpfc_mbx_rd_conf_cq_count_MASK         0x0000FFFF
+#define lpfc_mbx_rd_conf_cq_count_WORD         word18
+};
+
+struct lpfc_mbx_request_features {
+       uint32_t word1;
+#define lpfc_mbx_rq_ftr_qry_SHIFT              0
+#define lpfc_mbx_rq_ftr_qry_MASK               0x00000001
+#define lpfc_mbx_rq_ftr_qry_WORD               word1
+       uint32_t word2;
+#define lpfc_mbx_rq_ftr_rq_iaab_SHIFT          0
+#define lpfc_mbx_rq_ftr_rq_iaab_MASK           0x00000001
+#define lpfc_mbx_rq_ftr_rq_iaab_WORD           word2
+#define lpfc_mbx_rq_ftr_rq_npiv_SHIFT          1
+#define lpfc_mbx_rq_ftr_rq_npiv_MASK           0x00000001
+#define lpfc_mbx_rq_ftr_rq_npiv_WORD           word2
+#define lpfc_mbx_rq_ftr_rq_dif_SHIFT           2
+#define lpfc_mbx_rq_ftr_rq_dif_MASK            0x00000001
+#define lpfc_mbx_rq_ftr_rq_dif_WORD            word2
+#define lpfc_mbx_rq_ftr_rq_vf_SHIFT            3
+#define lpfc_mbx_rq_ftr_rq_vf_MASK             0x00000001
+#define lpfc_mbx_rq_ftr_rq_vf_WORD             word2
+#define lpfc_mbx_rq_ftr_rq_fcpi_SHIFT          4
+#define lpfc_mbx_rq_ftr_rq_fcpi_MASK           0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpi_WORD           word2
+#define lpfc_mbx_rq_ftr_rq_fcpt_SHIFT          5
+#define lpfc_mbx_rq_ftr_rq_fcpt_MASK           0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpt_WORD           word2
+#define lpfc_mbx_rq_ftr_rq_fcpc_SHIFT          6
+#define lpfc_mbx_rq_ftr_rq_fcpc_MASK           0x00000001
+#define lpfc_mbx_rq_ftr_rq_fcpc_WORD           word2
+#define lpfc_mbx_rq_ftr_rq_ifip_SHIFT          7
+#define lpfc_mbx_rq_ftr_rq_ifip_MASK           0x00000001
+#define lpfc_mbx_rq_ftr_rq_ifip_WORD           word2
+       uint32_t word3;
+#define lpfc_mbx_rq_ftr_rsp_iaab_SHIFT         0
+#define lpfc_mbx_rq_ftr_rsp_iaab_MASK          0x00000001
+#define lpfc_mbx_rq_ftr_rsp_iaab_WORD          word3
+#define lpfc_mbx_rq_ftr_rsp_npiv_SHIFT         1
+#define lpfc_mbx_rq_ftr_rsp_npiv_MASK          0x00000001
+#define lpfc_mbx_rq_ftr_rsp_npiv_WORD          word3
+#define lpfc_mbx_rq_ftr_rsp_dif_SHIFT          2
+#define lpfc_mbx_rq_ftr_rsp_dif_MASK           0x00000001
+#define lpfc_mbx_rq_ftr_rsp_dif_WORD           word3
+#define lpfc_mbx_rq_ftr_rsp_vf_SHIFT           3
+#define lpfc_mbx_rq_ftr_rsp_vf__MASK           0x00000001
+#define lpfc_mbx_rq_ftr_rsp_vf_WORD            word3
+#define lpfc_mbx_rq_ftr_rsp_fcpi_SHIFT         4
+#define lpfc_mbx_rq_ftr_rsp_fcpi_MASK          0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpi_WORD          word3
+#define lpfc_mbx_rq_ftr_rsp_fcpt_SHIFT         5
+#define lpfc_mbx_rq_ftr_rsp_fcpt_MASK          0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpt_WORD          word3
+#define lpfc_mbx_rq_ftr_rsp_fcpc_SHIFT         6
+#define lpfc_mbx_rq_ftr_rsp_fcpc_MASK          0x00000001
+#define lpfc_mbx_rq_ftr_rsp_fcpc_WORD          word3
+#define lpfc_mbx_rq_ftr_rsp_ifip_SHIFT         7
+#define lpfc_mbx_rq_ftr_rsp_ifip_MASK          0x00000001
+#define lpfc_mbx_rq_ftr_rsp_ifip_WORD          word3
+};
+
+/* Mailbox Completion Queue Error Messages */
+#define MB_CQE_STATUS_SUCCESS                  0x0
+#define MB_CQE_STATUS_INSUFFICIENT_PRIVILEGES  0x1
+#define MB_CQE_STATUS_INVALID_PARAMETER                0x2
+#define MB_CQE_STATUS_INSUFFICIENT_RESOURCES   0x3
+#define MB_CEQ_STATUS_QUEUE_FLUSHING           0x4
+#define MB_CQE_STATUS_DMA_FAILED               0x5
+
+/* mailbox queue entry structure */
+struct lpfc_mqe {
+       uint32_t word0;
+#define lpfc_mqe_status_SHIFT          16
+#define lpfc_mqe_status_MASK           0x0000FFFF
+#define lpfc_mqe_status_WORD           word0
+#define lpfc_mqe_command_SHIFT         8
+#define lpfc_mqe_command_MASK          0x000000FF
+#define lpfc_mqe_command_WORD          word0
+       union {
+               uint32_t mb_words[LPFC_SLI4_MB_WORD_COUNT - 1];
+               /* sli4 mailbox commands */
+               struct lpfc_mbx_sli4_config sli4_config;
+               struct lpfc_mbx_init_vfi init_vfi;
+               struct lpfc_mbx_reg_vfi reg_vfi;
+               struct lpfc_mbx_reg_vfi unreg_vfi;
+               struct lpfc_mbx_init_vpi init_vpi;
+               struct lpfc_mbx_resume_rpi resume_rpi;
+               struct lpfc_mbx_read_fcf_tbl read_fcf_tbl;
+               struct lpfc_mbx_add_fcf_tbl_entry add_fcf_entry;
+               struct lpfc_mbx_del_fcf_tbl_entry del_fcf_entry;
+               struct lpfc_mbx_reg_fcfi reg_fcfi;
+               struct lpfc_mbx_unreg_fcfi unreg_fcfi;
+               struct lpfc_mbx_mq_create mq_create;
+               struct lpfc_mbx_eq_create eq_create;
+               struct lpfc_mbx_cq_create cq_create;
+               struct lpfc_mbx_wq_create wq_create;
+               struct lpfc_mbx_rq_create rq_create;
+               struct lpfc_mbx_mq_destroy mq_destroy;
+               struct lpfc_mbx_eq_destroy eq_destroy;
+               struct lpfc_mbx_cq_destroy cq_destroy;
+               struct lpfc_mbx_wq_destroy wq_destroy;
+               struct lpfc_mbx_rq_destroy rq_destroy;
+               struct lpfc_mbx_post_sgl_pages post_sgl_pages;
+               struct lpfc_mbx_nembed_cmd nembed_cmd;
+               struct lpfc_mbx_read_rev read_rev;
+               struct lpfc_mbx_read_vpi read_vpi;
+               struct lpfc_mbx_read_config rd_config;
+               struct lpfc_mbx_request_features req_ftrs;
+               struct lpfc_mbx_post_hdr_tmpl hdr_tmpl;
+               struct lpfc_mbx_nop nop;
+       } un;
+};
+
+struct lpfc_mcqe {
+       uint32_t word0;
+#define lpfc_mcqe_status_SHIFT         0
+#define lpfc_mcqe_status_MASK          0x0000FFFF
+#define lpfc_mcqe_status_WORD          word0
+#define lpfc_mcqe_ext_status_SHIFT     16
+#define lpfc_mcqe_ext_status_MASK      0x0000FFFF
+#define lpfc_mcqe_ext_status_WORD      word0
+       uint32_t mcqe_tag0;
+       uint32_t mcqe_tag1;
+       uint32_t trailer;
+#define lpfc_trailer_valid_SHIFT       31
+#define lpfc_trailer_valid_MASK                0x00000001
+#define lpfc_trailer_valid_WORD                trailer
+#define lpfc_trailer_async_SHIFT       30
+#define lpfc_trailer_async_MASK                0x00000001
+#define lpfc_trailer_async_WORD                trailer
+#define lpfc_trailer_hpi_SHIFT         29
+#define lpfc_trailer_hpi_MASK          0x00000001
+#define lpfc_trailer_hpi_WORD          trailer
+#define lpfc_trailer_completed_SHIFT   28
+#define lpfc_trailer_completed_MASK    0x00000001
+#define lpfc_trailer_completed_WORD    trailer
+#define lpfc_trailer_consumed_SHIFT    27
+#define lpfc_trailer_consumed_MASK     0x00000001
+#define lpfc_trailer_consumed_WORD     trailer
+#define lpfc_trailer_type_SHIFT                16
+#define lpfc_trailer_type_MASK         0x000000FF
+#define lpfc_trailer_type_WORD         trailer
+#define lpfc_trailer_code_SHIFT                8
+#define lpfc_trailer_code_MASK         0x000000FF
+#define lpfc_trailer_code_WORD         trailer
+#define LPFC_TRAILER_CODE_LINK 0x1
+#define LPFC_TRAILER_CODE_FCOE 0x2
+#define LPFC_TRAILER_CODE_DCBX 0x3
+};
+
+struct lpfc_acqe_link {
+       uint32_t word0;
+#define lpfc_acqe_link_speed_SHIFT             24
+#define lpfc_acqe_link_speed_MASK              0x000000FF
+#define lpfc_acqe_link_speed_WORD              word0
+#define LPFC_ASYNC_LINK_SPEED_ZERO             0x0
+#define LPFC_ASYNC_LINK_SPEED_10MBPS           0x1
+#define LPFC_ASYNC_LINK_SPEED_100MBPS          0x2
+#define LPFC_ASYNC_LINK_SPEED_1GBPS            0x3
+#define LPFC_ASYNC_LINK_SPEED_10GBPS           0x4
+#define lpfc_acqe_link_duplex_SHIFT            16
+#define lpfc_acqe_link_duplex_MASK             0x000000FF
+#define lpfc_acqe_link_duplex_WORD             word0
+#define LPFC_ASYNC_LINK_DUPLEX_NONE            0x0
+#define LPFC_ASYNC_LINK_DUPLEX_HALF            0x1
+#define LPFC_ASYNC_LINK_DUPLEX_FULL            0x2
+#define lpfc_acqe_link_status_SHIFT            8
+#define lpfc_acqe_link_status_MASK             0x000000FF
+#define lpfc_acqe_link_status_WORD             word0
+#define LPFC_ASYNC_LINK_STATUS_DOWN            0x0
+#define LPFC_ASYNC_LINK_STATUS_UP              0x1
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN    0x2
+#define LPFC_ASYNC_LINK_STATUS_LOGICAL_UP      0x3
+#define lpfc_acqe_link_physical_SHIFT          0
+#define lpfc_acqe_link_physical_MASK           0x000000FF
+#define lpfc_acqe_link_physical_WORD           word0
+#define LPFC_ASYNC_LINK_PORT_A                 0x0
+#define LPFC_ASYNC_LINK_PORT_B                 0x1
+       uint32_t word1;
+#define lpfc_acqe_link_fault_SHIFT     0
+#define lpfc_acqe_link_fault_MASK      0x000000FF
+#define lpfc_acqe_link_fault_WORD      word1
+#define LPFC_ASYNC_LINK_FAULT_NONE     0x0
+#define LPFC_ASYNC_LINK_FAULT_LOCAL    0x1
+#define LPFC_ASYNC_LINK_FAULT_REMOTE   0x2
+       uint32_t event_tag;
+       uint32_t trailer;
+};
+
+struct lpfc_acqe_fcoe {
+       uint32_t fcf_index;
+       uint32_t word1;
+#define lpfc_acqe_fcoe_fcf_count_SHIFT         0
+#define lpfc_acqe_fcoe_fcf_count_MASK          0x0000FFFF
+#define lpfc_acqe_fcoe_fcf_count_WORD          word1
+#define lpfc_acqe_fcoe_event_type_SHIFT                16
+#define lpfc_acqe_fcoe_event_type_MASK         0x0000FFFF
+#define lpfc_acqe_fcoe_event_type_WORD         word1
+#define LPFC_FCOE_EVENT_TYPE_NEW_FCF           0x1
+#define LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL    0x2
+#define LPFC_FCOE_EVENT_TYPE_FCF_DEAD          0x3
+       uint32_t event_tag;
+       uint32_t trailer;
+};
+
+struct lpfc_acqe_dcbx {
+       uint32_t tlv_ttl;
+       uint32_t reserved;
+       uint32_t event_tag;
+       uint32_t trailer;
+};
+
+/*
+ * Define the bootstrap mailbox (bmbx) region used to communicate
+ * mailbox command between the host and port. The mailbox consists
+ * of a payload area of 256 bytes and a completion queue of length
+ * 16 bytes.
+ */
+struct lpfc_bmbx_create {
+       struct lpfc_mqe mqe;
+       struct lpfc_mcqe mcqe;
+};
+
+#define SGL_ALIGN_SZ 64
+#define SGL_PAGE_SIZE 4096
+/* align SGL addr on a size boundary - adjust address up */
+#define NO_XRI ((uint16_t)-1)
+struct wqe_common {
+       uint32_t word6;
+#define wqe_xri_SHIFT         0
+#define wqe_xri_MASK          0x0000FFFF
+#define wqe_xri_WORD          word6
+#define wqe_ctxt_tag_SHIFT    16
+#define wqe_ctxt_tag_MASK     0x0000FFFF
+#define wqe_ctxt_tag_WORD     word6
+       uint32_t word7;
+#define wqe_ct_SHIFT          2
+#define wqe_ct_MASK           0x00000003
+#define wqe_ct_WORD           word7
+#define wqe_status_SHIFT      4
+#define wqe_status_MASK       0x0000000f
+#define wqe_status_WORD       word7
+#define wqe_cmnd_SHIFT        8
+#define wqe_cmnd_MASK         0x000000ff
+#define wqe_cmnd_WORD         word7
+#define wqe_class_SHIFT       16
+#define wqe_class_MASK        0x00000007
+#define wqe_class_WORD        word7
+#define wqe_pu_SHIFT          20
+#define wqe_pu_MASK           0x00000003
+#define wqe_pu_WORD           word7
+#define wqe_erp_SHIFT         22
+#define wqe_erp_MASK          0x00000001
+#define wqe_erp_WORD          word7
+#define wqe_lnk_SHIFT         23
+#define wqe_lnk_MASK          0x00000001
+#define wqe_lnk_WORD          word7
+#define wqe_tmo_SHIFT         24
+#define wqe_tmo_MASK          0x000000ff
+#define wqe_tmo_WORD          word7
+       uint32_t abort_tag; /* word 8 in WQE */
+       uint32_t word9;
+#define wqe_reqtag_SHIFT      0
+#define wqe_reqtag_MASK       0x0000FFFF
+#define wqe_reqtag_WORD       word9
+#define wqe_rcvoxid_SHIFT     16
+#define wqe_rcvoxid_MASK       0x0000FFFF
+#define wqe_rcvoxid_WORD       word9
+       uint32_t word10;
+#define wqe_pri_SHIFT         16
+#define wqe_pri_MASK          0x00000007
+#define wqe_pri_WORD          word10
+#define wqe_pv_SHIFT          19
+#define wqe_pv_MASK           0x00000001
+#define wqe_pv_WORD           word10
+#define wqe_xc_SHIFT          21
+#define wqe_xc_MASK           0x00000001
+#define wqe_xc_WORD           word10
+#define wqe_ccpe_SHIFT        23
+#define wqe_ccpe_MASK         0x00000001
+#define wqe_ccpe_WORD         word10
+#define wqe_ccp_SHIFT         24
+#define wqe_ccp_MASK         0x000000ff
+#define wqe_ccp_WORD         word10
+       uint32_t word11;
+#define wqe_cmd_type_SHIFT  0
+#define wqe_cmd_type_MASK   0x0000000f
+#define wqe_cmd_type_WORD   word11
+#define wqe_wqec_SHIFT      7
+#define wqe_wqec_MASK       0x00000001
+#define wqe_wqec_WORD       word11
+#define wqe_cqid_SHIFT      16
+#define wqe_cqid_MASK       0x000003ff
+#define wqe_cqid_WORD       word11
+};
+
+struct wqe_did {
+       uint32_t word5;
+#define wqe_els_did_SHIFT         0
+#define wqe_els_did_MASK          0x00FFFFFF
+#define wqe_els_did_WORD          word5
+#define wqe_xmit_bls_ar_SHIFT         30
+#define wqe_xmit_bls_ar_MASK          0x00000001
+#define wqe_xmit_bls_ar_WORD          word5
+#define wqe_xmit_bls_xo_SHIFT         31
+#define wqe_xmit_bls_xo_MASK          0x00000001
+#define wqe_xmit_bls_xo_WORD          word5
+};
+
+struct els_request64_wqe {
+       struct ulp_bde64 bde;
+       uint32_t payload_len;
+       uint32_t word4;
+#define els_req64_sid_SHIFT         0
+#define els_req64_sid_MASK          0x00FFFFFF
+#define els_req64_sid_WORD          word4
+#define els_req64_sp_SHIFT          24
+#define els_req64_sp_MASK           0x00000001
+#define els_req64_sp_WORD           word4
+#define els_req64_vf_SHIFT          25
+#define els_req64_vf_MASK           0x00000001
+#define els_req64_vf_WORD           word4
+       struct wqe_did  wqe_dest;
+       struct wqe_common wqe_com; /* words 6-11 */
+       uint32_t word12;
+#define els_req64_vfid_SHIFT        1
+#define els_req64_vfid_MASK         0x00000FFF
+#define els_req64_vfid_WORD         word12
+#define els_req64_pri_SHIFT         13
+#define els_req64_pri_MASK          0x00000007
+#define els_req64_pri_WORD          word12
+       uint32_t word13;
+#define els_req64_hopcnt_SHIFT      24
+#define els_req64_hopcnt_MASK       0x000000ff
+#define els_req64_hopcnt_WORD       word13
+       uint32_t reserved[2];
+};
+
+struct xmit_els_rsp64_wqe {
+       struct ulp_bde64 bde;
+       uint32_t rsvd3;
+       uint32_t rsvd4;
+       struct wqe_did  wqe_dest;
+       struct wqe_common wqe_com; /* words 6-11 */
+       uint32_t rsvd_12_15[4];
+};
+
+struct xmit_bls_rsp64_wqe {
+       uint32_t payload0;
+       uint32_t word1;
+#define xmit_bls_rsp64_rxid_SHIFT  0
+#define xmit_bls_rsp64_rxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_rxid_WORD   word1
+#define xmit_bls_rsp64_oxid_SHIFT  16
+#define xmit_bls_rsp64_oxid_MASK   0x0000ffff
+#define xmit_bls_rsp64_oxid_WORD   word1
+       uint32_t word2;
+#define xmit_bls_rsp64_seqcntlo_SHIFT  0
+#define xmit_bls_rsp64_seqcntlo_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcntlo_WORD   word2
+#define xmit_bls_rsp64_seqcnthi_SHIFT  16
+#define xmit_bls_rsp64_seqcnthi_MASK   0x0000ffff
+#define xmit_bls_rsp64_seqcnthi_WORD   word2
+       uint32_t rsrvd3;
+       uint32_t rsrvd4;
+       struct wqe_did  wqe_dest;
+       struct wqe_common wqe_com; /* words 6-11 */
+       uint32_t rsvd_12_15[4];
+};
+struct wqe_rctl_dfctl {
+       uint32_t word5;
+#define wqe_si_SHIFT 2
+#define wqe_si_MASK  0x000000001
+#define wqe_si_WORD  word5
+#define wqe_la_SHIFT 3
+#define wqe_la_MASK  0x000000001
+#define wqe_la_WORD  word5
+#define wqe_ls_SHIFT 7
+#define wqe_ls_MASK  0x000000001
+#define wqe_ls_WORD  word5
+#define wqe_dfctl_SHIFT 8
+#define wqe_dfctl_MASK  0x0000000ff
+#define wqe_dfctl_WORD  word5
+#define wqe_type_SHIFT 16
+#define wqe_type_MASK  0x0000000ff
+#define wqe_type_WORD  word5
+#define wqe_rctl_SHIFT 24
+#define wqe_rctl_MASK  0x0000000ff
+#define wqe_rctl_WORD  word5
+};
+
+struct xmit_seq64_wqe {
+       struct ulp_bde64 bde;
+       uint32_t paylaod_offset;
+       uint32_t relative_offset;
+       struct wqe_rctl_dfctl wge_ctl;
+       struct wqe_common wqe_com; /* words 6-11 */
+       /* Note: word10 different REVISIT */
+       uint32_t xmit_len;
+       uint32_t rsvd_12_15[3];
+};
+struct xmit_bcast64_wqe {
+       struct ulp_bde64 bde;
+       uint32_t paylaod_len;
+       uint32_t rsvd4;
+       struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+       struct wqe_common wqe_com;     /* words 6-11 */
+       uint32_t rsvd_12_15[4];
+};
+
+struct gen_req64_wqe {
+       struct ulp_bde64 bde;
+       uint32_t command_len;
+       uint32_t payload_len;
+       struct wqe_rctl_dfctl wge_ctl; /* word 5 */
+       struct wqe_common wqe_com;     /* words 6-11 */
+       uint32_t rsvd_12_15[4];
+};
+
+struct create_xri_wqe {
+       uint32_t rsrvd[5];           /* words 0-4 */
+       struct wqe_did  wqe_dest;  /* word 5 */
+       struct wqe_common wqe_com; /* words 6-11 */
+       uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+#define T_REQUEST_TAG 3
+#define T_XRI_TAG 1
+
+struct abort_cmd_wqe {
+       uint32_t rsrvd[3];
+       uint32_t word3;
+#define        abort_cmd_ia_SHIFT  0
+#define        abort_cmd_ia_MASK  0x000000001
+#define        abort_cmd_ia_WORD  word3
+#define        abort_cmd_criteria_SHIFT  8
+#define        abort_cmd_criteria_MASK  0x0000000ff
+#define        abort_cmd_criteria_WORD  word3
+       uint32_t rsrvd4;
+       uint32_t rsrvd5;
+       struct wqe_common wqe_com;     /* words 6-11 */
+       uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_iwrite64_wqe {
+       struct ulp_bde64 bde;
+       uint32_t payload_len;
+       uint32_t total_xfer_len;
+       uint32_t initial_xfer_len;
+       struct wqe_common wqe_com;     /* words 6-11 */
+       uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_iread64_wqe {
+       struct ulp_bde64 bde;
+       uint32_t payload_len;          /* word 3 */
+       uint32_t total_xfer_len;       /* word 4 */
+       uint32_t rsrvd5;               /* word 5 */
+       struct wqe_common wqe_com;     /* words 6-11 */
+       uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+struct fcp_icmnd64_wqe {
+       struct ulp_bde64 bde;    /* words 0-2 */
+       uint32_t rsrvd[3];             /* words 3-5 */
+       struct wqe_common wqe_com;     /* words 6-11 */
+       uint32_t rsvd_12_15[4];         /* word 12-15 */
+};
+
+
+union lpfc_wqe {
+       uint32_t words[16];
+       struct lpfc_wqe_generic generic;
+       struct fcp_icmnd64_wqe fcp_icmd;
+       struct fcp_iread64_wqe fcp_iread;
+       struct fcp_iwrite64_wqe fcp_iwrite;
+       struct abort_cmd_wqe abort_cmd;
+       struct create_xri_wqe create_xri;
+       struct xmit_bcast64_wqe xmit_bcast64;
+       struct xmit_seq64_wqe xmit_sequence;
+       struct xmit_bls_rsp64_wqe xmit_bls_rsp;
+       struct xmit_els_rsp64_wqe xmit_els_rsp;
+       struct els_request64_wqe els_req;
+       struct gen_req64_wqe gen_req;
+};
+
+#define FCP_COMMAND 0x0
+#define FCP_COMMAND_DATA_OUT 0x1
+#define ELS_COMMAND_NON_FIP 0xC
+#define ELS_COMMAND_FIP 0xD
+#define OTHER_COMMAND 0x8
+
index 86d1bdcbf2d819003355d81c06318c451575d23d..2f5907f92eeaee7cac3079615ac035d67c27e1a5 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -51,9 +53,23 @@ char *_dump_buf_dif;
 unsigned long _dump_buf_dif_order;
 spinlock_t _dump_buf_lock;
 
-static int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
 static void lpfc_get_hba_model_desc(struct lpfc_hba *, uint8_t *, uint8_t *);
 static int lpfc_post_rcv_buf(struct lpfc_hba *);
+static int lpfc_sli4_queue_create(struct lpfc_hba *);
+static void lpfc_sli4_queue_destroy(struct lpfc_hba *);
+static int lpfc_create_bootstrap_mbox(struct lpfc_hba *);
+static int lpfc_setup_endian_order(struct lpfc_hba *);
+static int lpfc_sli4_read_config(struct lpfc_hba *);
+static void lpfc_destroy_bootstrap_mbox(struct lpfc_hba *);
+static void lpfc_free_sgl_list(struct lpfc_hba *);
+static int lpfc_init_sgl_list(struct lpfc_hba *);
+static int lpfc_init_active_sgl_array(struct lpfc_hba *);
+static void lpfc_free_active_sgl(struct lpfc_hba *);
+static int lpfc_hba_down_post_s3(struct lpfc_hba *phba);
+static int lpfc_hba_down_post_s4(struct lpfc_hba *phba);
+static int lpfc_sli4_cq_event_pool_create(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *);
+static void lpfc_sli4_cq_event_release_all(struct lpfc_hba *);
 
 static struct scsi_transport_template *lpfc_transport_template = NULL;
 static struct scsi_transport_template *lpfc_vport_transport_template = NULL;
@@ -92,7 +108,7 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
                return -ENOMEM;
        }
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        phba->link_state = LPFC_INIT_MBX_CMDS;
 
        if (lpfc_is_LC_HBA(phba->pcidev->device)) {
@@ -205,6 +221,11 @@ lpfc_config_port_prep(struct lpfc_hba *phba)
                                        mb->mbxCommand, mb->mbxStatus);
                        mb->un.varDmp.word_cnt = 0;
                }
+               /* dump mem may return a zero when finished or we got a
+                * mailbox error, either way we are done.
+                */
+               if (mb->un.varDmp.word_cnt == 0)
+                       break;
                if (mb->un.varDmp.word_cnt > DMP_VPD_SIZE - offset)
                        mb->un.varDmp.word_cnt = DMP_VPD_SIZE - offset;
                lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
@@ -233,7 +254,7 @@ out_free_mbox:
 static void
 lpfc_config_async_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
 {
-       if (pmboxq->mb.mbxStatus == MBX_SUCCESS)
+       if (pmboxq->u.mb.mbxStatus == MBX_SUCCESS)
                phba->temp_sensor_support = 1;
        else
                phba->temp_sensor_support = 0;
@@ -260,7 +281,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
        /* character array used for decoding dist type. */
        char dist_char[] = "nabx";
 
-       if (pmboxq->mb.mbxStatus != MBX_SUCCESS) {
+       if (pmboxq->u.mb.mbxStatus != MBX_SUCCESS) {
                mempool_free(pmboxq, phba->mbox_mem_pool);
                return;
        }
@@ -268,7 +289,7 @@ lpfc_dump_wakeup_param_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
        prg = (struct prog_id *) &prog_id_word;
 
        /* word 7 contain option rom version */
-       prog_id_word = pmboxq->mb.un.varWords[7];
+       prog_id_word = pmboxq->u.mb.un.varWords[7];
 
        /* Decode the Option rom version word to a readable string */
        if (prg->dist < 4)
@@ -325,7 +346,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                phba->link_state = LPFC_HBA_ERROR;
                return -ENOMEM;
        }
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
 
        /* Get login parameters for NID.  */
        lpfc_read_sparam(phba, pmb, 0);
@@ -364,6 +385,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
        /* Update the fc_host data structures with new wwn. */
        fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
        fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+       fc_host_max_npiv_vports(shost) = phba->max_vpi;
 
        /* If no serial number in VPD data, use low 6 bytes of WWNN */
        /* This should be consolidated into parse_vpd ? - mr */
@@ -460,17 +482,18 @@ lpfc_config_port_post(struct lpfc_hba *phba)
                        lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
                                        "0352 Config MSI mailbox command "
                                        "failed, mbxCmd x%x, mbxStatus x%x\n",
-                                       pmb->mb.mbxCommand, pmb->mb.mbxStatus);
+                                       pmb->u.mb.mbxCommand,
+                                       pmb->u.mb.mbxStatus);
                        mempool_free(pmb, phba->mbox_mem_pool);
                        return -EIO;
                }
        }
 
+       spin_lock_irq(&phba->hbalock);
        /* Initialize ERATT handling flag */
        phba->hba_flag &= ~HBA_ERATT_HANDLED;
 
        /* Enable appropriate host interrupts */
-       spin_lock_irq(&phba->hbalock);
        status = readl(phba->HCregaddr);
        status |= HC_MBINT_ENA | HC_ERINT_ENA | HC_LAINT_ENA;
        if (psli->num_rings > 0)
@@ -571,16 +594,20 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
 {
        struct lpfc_vport **vports;
        int i;
-       /* Disable interrupts */
-       writel(0, phba->HCregaddr);
-       readl(phba->HCregaddr); /* flush */
+
+       if (phba->sli_rev <= LPFC_SLI_REV3) {
+               /* Disable interrupts */
+               writel(0, phba->HCregaddr);
+               readl(phba->HCregaddr); /* flush */
+       }
 
        if (phba->pport->load_flag & FC_UNLOADING)
                lpfc_cleanup_discovery_resources(phba->pport);
        else {
                vports = lpfc_create_vport_work_array(phba);
                if (vports != NULL)
-                       for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+                       for (i = 0; i <= phba->max_vports &&
+                               vports[i] != NULL; i++)
                                lpfc_cleanup_discovery_resources(vports[i]);
                lpfc_destroy_vport_work_array(phba, vports);
        }
@@ -588,7 +615,7 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_hba_down_post - Perform lpfc uninitialization after HBA reset
+ * lpfc_hba_down_post_s3 - Perform lpfc uninitialization after HBA reset
  * @phba: pointer to lpfc HBA data structure.
  *
  * This routine will do uninitialization after the HBA is reset when bring
@@ -598,8 +625,8 @@ lpfc_hba_down_prep(struct lpfc_hba *phba)
  *   0 - sucess.
  *   Any other value - error.
  **/
-int
-lpfc_hba_down_post(struct lpfc_hba *phba)
+static int
+lpfc_hba_down_post_s3(struct lpfc_hba *phba)
 {
        struct lpfc_sli *psli = &phba->sli;
        struct lpfc_sli_ring *pring;
@@ -642,6 +669,77 @@ lpfc_hba_down_post(struct lpfc_hba *phba)
 
        return 0;
 }
+/**
+ * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will do uninitialization after the HBA is reset when bring
+ * down the SLI Layer.
+ *
+ * Return codes
+ *   0 - sucess.
+ *   Any other value - error.
+ **/
+static int
+lpfc_hba_down_post_s4(struct lpfc_hba *phba)
+{
+       struct lpfc_scsi_buf *psb, *psb_next;
+       LIST_HEAD(aborts);
+       int ret;
+       unsigned long iflag = 0;
+       ret = lpfc_hba_down_post_s3(phba);
+       if (ret)
+               return ret;
+       /* At this point in time the HBA is either reset or DOA. Either
+        * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
+        * on the lpfc_sgl_list so that it can either be freed if the
+        * driver is unloading or reposted if the driver is restarting
+        * the port.
+        */
+       spin_lock_irq(&phba->hbalock);  /* required for lpfc_sgl_list and */
+                                       /* scsl_buf_list */
+       /* abts_sgl_list_lock required because worker thread uses this
+        * list.
+        */
+       spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+       list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
+                       &phba->sli4_hba.lpfc_sgl_list);
+       spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+       /* abts_scsi_buf_list_lock required because worker thread uses this
+        * list.
+        */
+       spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+       list_splice_init(&phba->sli4_hba.lpfc_abts_scsi_buf_list,
+                       &aborts);
+       spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+       spin_unlock_irq(&phba->hbalock);
+
+       list_for_each_entry_safe(psb, psb_next, &aborts, list) {
+               psb->pCmd = NULL;
+               psb->status = IOSTAT_SUCCESS;
+       }
+       spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+       list_splice(&aborts, &phba->lpfc_scsi_buf_list);
+       spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       return 0;
+}
+
+/**
+ * lpfc_hba_down_post - Wrapper func for hba down post routine
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 routine for performing
+ * uninitialization after the HBA is reset when bring down the SLI Layer.
+ *
+ * Return codes
+ *   0 - sucess.
+ *   Any other value - error.
+ **/
+int
+lpfc_hba_down_post(struct lpfc_hba *phba)
+{
+       return (*phba->lpfc_hba_down_post)(phba);
+}
 
 /**
  * lpfc_hb_timeout - The HBA-timer timeout handler
@@ -809,7 +907,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
                                        "taking this port offline.\n");
 
                        spin_lock_irq(&phba->hbalock);
-                       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+                       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
                        spin_unlock_irq(&phba->hbalock);
 
                        lpfc_offline_prep(phba);
@@ -834,13 +932,15 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
        struct lpfc_sli   *psli = &phba->sli;
 
        spin_lock_irq(&phba->hbalock);
-       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
        spin_unlock_irq(&phba->hbalock);
        lpfc_offline_prep(phba);
 
        lpfc_offline(phba);
        lpfc_reset_barrier(phba);
+       spin_lock_irq(&phba->hbalock);
        lpfc_sli_brdreset(phba);
+       spin_unlock_irq(&phba->hbalock);
        lpfc_hba_down_post(phba);
        lpfc_sli_brdready(phba, HS_MBRDY);
        lpfc_unblock_mgmt_io(phba);
@@ -848,6 +948,25 @@ lpfc_offline_eratt(struct lpfc_hba *phba)
        return;
 }
 
+/**
+ * lpfc_sli4_offline_eratt - Bring lpfc offline on SLI4 hardware error attention
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to bring a SLI4 HBA offline when HBA hardware error
+ * other than Port Error 6 has been detected.
+ **/
+static void
+lpfc_sli4_offline_eratt(struct lpfc_hba *phba)
+{
+       lpfc_offline_prep(phba);
+       lpfc_offline(phba);
+       lpfc_sli4_brdreset(phba);
+       lpfc_hba_down_post(phba);
+       lpfc_sli4_post_status_check(phba);
+       lpfc_unblock_mgmt_io(phba);
+       phba->link_state = LPFC_HBA_ERROR;
+}
+
 /**
  * lpfc_handle_deferred_eratt - The HBA hardware deferred error handler
  * @phba: pointer to lpfc hba data structure.
@@ -864,6 +983,16 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
        struct lpfc_sli_ring  *pring;
        struct lpfc_sli *psli = &phba->sli;
 
+       /* If the pci channel is offline, ignore possible errors,
+        * since we cannot communicate with the pci card anyway.
+        */
+       if (pci_channel_offline(phba->pcidev)) {
+               spin_lock_irq(&phba->hbalock);
+               phba->hba_flag &= ~DEFER_ERATT;
+               spin_unlock_irq(&phba->hbalock);
+               return;
+       }
+
        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                "0479 Deferred Adapter Hardware Error "
                "Data: x%x x%x x%x\n",
@@ -871,7 +1000,7 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
                phba->work_status[0], phba->work_status[1]);
 
        spin_lock_irq(&phba->hbalock);
-       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
        spin_unlock_irq(&phba->hbalock);
 
 
@@ -909,13 +1038,30 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
        if ((!phba->work_hs) && (!(phba->pport->load_flag & FC_UNLOADING)))
                phba->work_hs = old_host_status & ~HS_FFER1;
 
+       spin_lock_irq(&phba->hbalock);
        phba->hba_flag &= ~DEFER_ERATT;
+       spin_unlock_irq(&phba->hbalock);
        phba->work_status[0] = readl(phba->MBslimaddr + 0xa8);
        phba->work_status[1] = readl(phba->MBslimaddr + 0xac);
 }
 
+static void
+lpfc_board_errevt_to_mgmt(struct lpfc_hba *phba)
+{
+       struct lpfc_board_event_header board_event;
+       struct Scsi_Host *shost;
+
+       board_event.event_type = FC_REG_BOARD_EVENT;
+       board_event.subcategory = LPFC_EVENT_PORTINTERR;
+       shost = lpfc_shost_from_vport(phba->pport);
+       fc_host_post_vendor_event(shost, fc_get_event_number(),
+                                 sizeof(board_event),
+                                 (char *) &board_event,
+                                 LPFC_NL_VENDOR_ID);
+}
+
 /**
- * lpfc_handle_eratt - The HBA hardware error handler
+ * lpfc_handle_eratt_s3 - The SLI3 HBA hardware error handler
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is invoked to handle the following HBA hardware error
@@ -924,8 +1070,8 @@ lpfc_handle_deferred_eratt(struct lpfc_hba *phba)
  * 2 - DMA ring index out of range
  * 3 - Mailbox command came back as unknown
  **/
-void
-lpfc_handle_eratt(struct lpfc_hba *phba)
+static void
+lpfc_handle_eratt_s3(struct lpfc_hba *phba)
 {
        struct lpfc_vport *vport = phba->pport;
        struct lpfc_sli   *psli = &phba->sli;
@@ -934,24 +1080,23 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
        unsigned long temperature;
        struct temp_event temp_event_data;
        struct Scsi_Host  *shost;
-       struct lpfc_board_event_header board_event;
 
        /* If the pci channel is offline, ignore possible errors,
-        * since we cannot communicate with the pci card anyway. */
-       if (pci_channel_offline(phba->pcidev))
+        * since we cannot communicate with the pci card anyway.
+        */
+       if (pci_channel_offline(phba->pcidev)) {
+               spin_lock_irq(&phba->hbalock);
+               phba->hba_flag &= ~DEFER_ERATT;
+               spin_unlock_irq(&phba->hbalock);
                return;
+       }
+
        /* If resets are disabled then leave the HBA alone and return */
        if (!phba->cfg_enable_hba_reset)
                return;
 
        /* Send an internal error event to mgmt application */
-       board_event.event_type = FC_REG_BOARD_EVENT;
-       board_event.subcategory = LPFC_EVENT_PORTINTERR;
-       shost = lpfc_shost_from_vport(phba->pport);
-       fc_host_post_vendor_event(shost, fc_get_event_number(),
-                                 sizeof(board_event),
-                                 (char *) &board_event,
-                                 LPFC_NL_VENDOR_ID);
+       lpfc_board_errevt_to_mgmt(phba);
 
        if (phba->hba_flag & DEFER_ERATT)
                lpfc_handle_deferred_eratt(phba);
@@ -965,7 +1110,7 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
                                phba->work_status[0], phba->work_status[1]);
 
                spin_lock_irq(&phba->hbalock);
-               psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+               psli->sli_flag &= ~LPFC_SLI_ACTIVE;
                spin_unlock_irq(&phba->hbalock);
 
                /*
@@ -1036,6 +1181,65 @@ lpfc_handle_eratt(struct lpfc_hba *phba)
        return;
 }
 
+/**
+ * lpfc_handle_eratt_s4 - The SLI4 HBA hardware error handler
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to handle the SLI4 HBA hardware error attention
+ * conditions.
+ **/
+static void
+lpfc_handle_eratt_s4(struct lpfc_hba *phba)
+{
+       struct lpfc_vport *vport = phba->pport;
+       uint32_t event_data;
+       struct Scsi_Host *shost;
+
+       /* If the pci channel is offline, ignore possible errors, since
+        * we cannot communicate with the pci card anyway.
+        */
+       if (pci_channel_offline(phba->pcidev))
+               return;
+       /* If resets are disabled then leave the HBA alone and return */
+       if (!phba->cfg_enable_hba_reset)
+               return;
+
+       /* Send an internal error event to mgmt application */
+       lpfc_board_errevt_to_mgmt(phba);
+
+       /* For now, the actual action for SLI4 device handling is not
+        * specified yet, just treated it as adaptor hardware failure
+        */
+       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "0143 SLI4 Adapter Hardware Error Data: x%x x%x\n",
+                       phba->work_status[0], phba->work_status[1]);
+
+       event_data = FC_REG_DUMP_EVENT;
+       shost = lpfc_shost_from_vport(vport);
+       fc_host_post_vendor_event(shost, fc_get_event_number(),
+                                 sizeof(event_data), (char *) &event_data,
+                                 SCSI_NL_VID_TYPE_PCI | PCI_VENDOR_ID_EMULEX);
+
+       lpfc_sli4_offline_eratt(phba);
+}
+
+/**
+ * lpfc_handle_eratt - Wrapper func for handling hba error attention
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba error attention handling
+ * routine from the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes
+ *   0 - sucess.
+ *   Any other value - error.
+ **/
+void
+lpfc_handle_eratt(struct lpfc_hba *phba)
+{
+       (*phba->lpfc_handle_eratt)(phba);
+}
+
 /**
  * lpfc_handle_latt - The HBA link event handler
  * @phba: pointer to lpfc hba data structure.
@@ -1137,7 +1341,7 @@ lpfc_handle_latt_err_exit:
  *   0 - pointer to the VPD passed in is NULL
  *   1 - success
  **/
-static int
+int
 lpfc_parse_vpd(struct lpfc_hba *phba, uint8_t *vpd, int len)
 {
        uint8_t lenlo, lenhi;
@@ -1292,6 +1496,7 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
        uint16_t dev_id = phba->pcidev->device;
        int max_speed;
        int GE = 0;
+       int oneConnect = 0; /* default is not a oneConnect */
        struct {
                char * name;
                int    max_speed;
@@ -1437,6 +1642,14 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
        case PCI_DEVICE_ID_PROTEUS_S:
                m = (typeof(m)) {"LPemv12002-S", max_speed, "PCIe IOV"};
                break;
+       case PCI_DEVICE_ID_TIGERSHARK:
+               oneConnect = 1;
+               m = (typeof(m)) {"OCe10100-F", max_speed, "PCIe"};
+               break;
+       case PCI_DEVICE_ID_TIGERSHARK_S:
+               oneConnect = 1;
+               m = (typeof(m)) {"OCe10100-F-S", max_speed, "PCIe"};
+               break;
        default:
                m = (typeof(m)){ NULL };
                break;
@@ -1444,13 +1657,24 @@ lpfc_get_hba_model_desc(struct lpfc_hba *phba, uint8_t *mdp, uint8_t *descp)
 
        if (mdp && mdp[0] == '\0')
                snprintf(mdp, 79,"%s", m.name);
-       if (descp && descp[0] == '\0')
-               snprintf(descp, 255,
-                       "Emulex %s %d%s %s %s",
-                       m.name, m.max_speed,
-                       (GE) ? "GE" : "Gb",
-                       m.bus,
-                       (GE) ? "FCoE Adapter" : "Fibre Channel Adapter");
+       /* oneConnect hba requires special processing, they are all initiators
+        * and we put the port number on the end
+        */
+       if (descp && descp[0] == '\0') {
+               if (oneConnect)
+                       snprintf(descp, 255,
+                               "Emulex OneConnect %s, FCoE Initiator, Port %s",
+                               m.name,
+                               phba->Port);
+               else
+                       snprintf(descp, 255,
+                               "Emulex %s %d%s %s %s",
+                               m.name, m.max_speed,
+                               (GE) ? "GE" : "Gb",
+                               m.bus,
+                               (GE) ? "FCoE Adapter" :
+                                       "Fibre Channel Adapter");
+       }
 }
 
 /**
@@ -1533,7 +1757,8 @@ lpfc_post_buffer(struct lpfc_hba *phba, struct lpfc_sli_ring *pring, int cnt)
                icmd->ulpCommand = CMD_QUE_RING_BUF64_CN;
                icmd->ulpLe = 1;
 
-               if (lpfc_sli_issue_iocb(phba, pring, iocb, 0) == IOCB_ERROR) {
+               if (lpfc_sli_issue_iocb(phba, pring->ringno, iocb, 0) ==
+                   IOCB_ERROR) {
                        lpfc_mbuf_free(phba, mp1->virt, mp1->phys);
                        kfree(mp1);
                        cnt++;
@@ -1761,7 +1986,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
         * Lets wait for this to happen, if needed.
         */
        while (!list_empty(&vport->fc_nodes)) {
-
                if (i++ > 3000) {
                        lpfc_printf_vlog(vport, KERN_ERR, LOG_DISCOVERY,
                                "0233 Nodelist not empty\n");
@@ -1782,7 +2006,6 @@ lpfc_cleanup(struct lpfc_vport *vport)
                /* Wait for any activity on ndlps to settle */
                msleep(10);
        }
-       return;
 }
 
 /**
@@ -1803,22 +2026,36 @@ lpfc_stop_vport_timers(struct lpfc_vport *vport)
 }
 
 /**
- * lpfc_stop_phba_timers - Stop all the timers associated with an HBA
+ * lpfc_stop_hba_timers - Stop all the timers associated with an HBA
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine stops all the timers associated with a HBA. This function is
  * invoked before either putting a HBA offline or unloading the driver.
  **/
-static void
-lpfc_stop_phba_timers(struct lpfc_hba *phba)
+void
+lpfc_stop_hba_timers(struct lpfc_hba *phba)
 {
-       del_timer_sync(&phba->fcp_poll_timer);
        lpfc_stop_vport_timers(phba->pport);
        del_timer_sync(&phba->sli.mbox_tmo);
        del_timer_sync(&phba->fabric_block_timer);
-       phba->hb_outstanding = 0;
-       del_timer_sync(&phba->hb_tmofunc);
        del_timer_sync(&phba->eratt_poll);
+       del_timer_sync(&phba->hb_tmofunc);
+       phba->hb_outstanding = 0;
+
+       switch (phba->pci_dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               /* Stop any LightPulse device specific driver timers */
+               del_timer_sync(&phba->fcp_poll_timer);
+               break;
+       case LPFC_PCI_DEV_OC:
+               /* Stop any OneConnect device sepcific driver timers */
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0297 Invalid device group (x%x)\n",
+                               phba->pci_dev_grp);
+               break;
+       }
        return;
 }
 
@@ -1878,14 +2115,21 @@ lpfc_online(struct lpfc_hba *phba)
                return 1;
        }
 
-       if (lpfc_sli_hba_setup(phba)) { /* Initialize the HBA */
-               lpfc_unblock_mgmt_io(phba);
-               return 1;
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               if (lpfc_sli4_hba_setup(phba)) { /* Initialize SLI4 HBA */
+                       lpfc_unblock_mgmt_io(phba);
+                       return 1;
+               }
+       } else {
+               if (lpfc_sli_hba_setup(phba)) { /* Initialize SLI2/SLI3 HBA */
+                       lpfc_unblock_mgmt_io(phba);
+                       return 1;
+               }
        }
 
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        struct Scsi_Host *shost;
                        shost = lpfc_shost_from_vport(vports[i]);
                        spin_lock_irq(shost->host_lock);
@@ -1947,11 +2191,12 @@ lpfc_offline_prep(struct lpfc_hba * phba)
        /* Issue an unreg_login to all nodes on all vports */
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL) {
-               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        struct Scsi_Host *shost;
 
                        if (vports[i]->load_flag & FC_UNLOADING)
                                continue;
+                       vports[i]->vfi_state &= ~LPFC_VFI_REGISTERED;
                        shost = lpfc_shost_from_vport(vports[i]);
                        list_for_each_entry_safe(ndlp, next_ndlp,
                                                 &vports[i]->fc_nodes,
@@ -1975,7 +2220,7 @@ lpfc_offline_prep(struct lpfc_hba * phba)
        }
        lpfc_destroy_vport_work_array(phba, vports);
 
-       lpfc_sli_flush_mbox_queue(phba);
+       lpfc_sli_mbox_sys_shutdown(phba);
 }
 
 /**
@@ -1996,11 +2241,11 @@ lpfc_offline(struct lpfc_hba *phba)
        if (phba->pport->fc_flag & FC_OFFLINE_MODE)
                return;
 
-       /* stop all timers associated with this hba */
-       lpfc_stop_phba_timers(phba);
+       /* stop port and all timers associated with this hba */
+       lpfc_stop_port(phba);
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++)
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
                        lpfc_stop_vport_timers(vports[i]);
        lpfc_destroy_vport_work_array(phba, vports);
        lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
@@ -2013,7 +2258,7 @@ lpfc_offline(struct lpfc_hba *phba)
        spin_unlock_irq(&phba->hbalock);
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        shost = lpfc_shost_from_vport(vports[i]);
                        spin_lock_irq(shost->host_lock);
                        vports[i]->work_port_events = 0;
@@ -2106,6 +2351,10 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
        shost->max_lun = vport->cfg_max_luns;
        shost->this_id = -1;
        shost->max_cmd_len = 16;
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               shost->dma_boundary = LPFC_SLI4_MAX_SEGMENT_SIZE;
+               shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+       }
 
        /*
         * Set initial can_queue value since 0 is no longer supported and
@@ -2123,6 +2372,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
 
        /* Initialize all internally managed lists. */
        INIT_LIST_HEAD(&vport->fc_nodes);
+       INIT_LIST_HEAD(&vport->rcv_buffer_list);
        spin_lock_init(&vport->work_port_lock);
 
        init_timer(&vport->fc_disctmo);
@@ -2314,192 +2564,501 @@ void lpfc_host_attrib_init(struct Scsi_Host *shost)
 }
 
 /**
- * lpfc_enable_msix - Enable MSI-X interrupt mode
+ * lpfc_stop_port_s3 - Stop SLI3 device port
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to enable the MSI-X interrupt vectors. The kernel
- * function pci_enable_msix() is called to enable the MSI-X vectors. Note that
- * pci_enable_msix(), once invoked, enables either all or nothing, depending
- * on the current availability of PCI vector resources. The device driver is
- * responsible for calling the individual request_irq() to register each MSI-X
- * vector with a interrupt handler, which is done in this function. Note that
- * later when device is unloading, the driver should always call free_irq()
- * on all MSI-X vectors it has done request_irq() on before calling
- * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
- * will be left with MSI-X enabled and leaks its vectors.
- *
- * Return codes
- *   0 - sucessful
- *   other values - error
+ * This routine is invoked to stop an SLI3 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
  **/
-static int
-lpfc_enable_msix(struct lpfc_hba *phba)
+static void
+lpfc_stop_port_s3(struct lpfc_hba *phba)
 {
-       int rc, i;
-       LPFC_MBOXQ_t *pmb;
+       /* Clear all interrupt enable conditions */
+       writel(0, phba->HCregaddr);
+       readl(phba->HCregaddr); /* flush */
+       /* Clear all pending interrupts */
+       writel(0xffffffff, phba->HAregaddr);
+       readl(phba->HAregaddr); /* flush */
 
-       /* Set up MSI-X multi-message vectors */
-       for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-               phba->msix_entries[i].entry = i;
+       /* Reset some HBA SLI setup states */
+       lpfc_stop_hba_timers(phba);
+       phba->pport->work_port_events = 0;
+}
 
-       /* Configure MSI-X capability structure */
-       rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
-                               ARRAY_SIZE(phba->msix_entries));
-       if (rc) {
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "0420 PCI enable MSI-X failed (%d)\n", rc);
-               goto msi_fail_out;
-       } else
-               for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                                       "0477 MSI-X entry[%d]: vector=x%x "
-                                       "message=%d\n", i,
-                                       phba->msix_entries[i].vector,
-                                       phba->msix_entries[i].entry);
-       /*
-        * Assign MSI-X vectors to interrupt handlers
-        */
+/**
+ * lpfc_stop_port_s4 - Stop SLI4 device port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to stop an SLI4 device port, it stops the device
+ * from generating interrupts and stops the device driver's timers for the
+ * device.
+ **/
+static void
+lpfc_stop_port_s4(struct lpfc_hba *phba)
+{
+       /* Reset some HBA SLI4 setup states */
+       lpfc_stop_hba_timers(phba);
+       phba->pport->work_port_events = 0;
+       phba->sli4_hba.intr_enable = 0;
+       /* Hard clear it for now, shall have more graceful way to wait later */
+       phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+}
 
-       /* vector-0 is associated to slow-path handler */
-       rc = request_irq(phba->msix_entries[0].vector, &lpfc_sp_intr_handler,
-                        IRQF_SHARED, LPFC_SP_DRIVER_HANDLER_NAME, phba);
-       if (rc) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                               "0421 MSI-X slow-path request_irq failed "
-                               "(%d)\n", rc);
-               goto msi_fail_out;
-       }
+/**
+ * lpfc_stop_port - Wrapper function for stopping hba port
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba stop port routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ **/
+void
+lpfc_stop_port(struct lpfc_hba *phba)
+{
+       phba->lpfc_stop_port(phba);
+}
 
-       /* vector-1 is associated to fast-path handler */
-       rc = request_irq(phba->msix_entries[1].vector, &lpfc_fp_intr_handler,
-                        IRQF_SHARED, LPFC_FP_DRIVER_HANDLER_NAME, phba);
+/**
+ * lpfc_sli4_remove_dflt_fcf - Remove the driver default fcf record from the port.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the driver default fcf record from
+ * the port.  This routine currently acts on FCF Index 0.
+ *
+ **/
+void
+lpfc_sli_remove_dflt_fcf(struct lpfc_hba *phba)
+{
+       int rc = 0;
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_mbx_del_fcf_tbl_entry *del_fcf_record;
+       uint32_t mbox_tmo, req_len;
+       uint32_t shdr_status, shdr_add_status;
 
-       if (rc) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                               "0429 MSI-X fast-path request_irq failed "
-                               "(%d)\n", rc);
-               goto irq_fail_out;
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2020 Failed to allocate mbox for ADD_FCF cmd\n");
+               return;
        }
 
+       req_len = sizeof(struct lpfc_mbx_del_fcf_tbl_entry) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr);
+       rc = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+                             LPFC_MBOX_OPCODE_FCOE_DELETE_FCF,
+                             req_len, LPFC_SLI4_MBX_EMBED);
        /*
-        * Configure HBA MSI-X attention conditions to messages
+        * In phase 1, there is a single FCF index, 0.  In phase2, the driver
+        * supports multiple FCF indices.
         */
-       pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       del_fcf_record = &mboxq->u.mqe.un.del_fcf_entry;
+       bf_set(lpfc_mbx_del_fcf_tbl_count, del_fcf_record, 1);
+       bf_set(lpfc_mbx_del_fcf_tbl_index, del_fcf_record,
+              phba->fcf.fcf_indx);
 
-       if (!pmb) {
-               rc = -ENOMEM;
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0474 Unable to allocate memory for issuing "
-                               "MBOX_CONFIG_MSI command\n");
-               goto mem_fail_out;
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
        }
-       rc = lpfc_config_msi(phba, pmb);
-       if (rc)
-               goto mbx_fail_out;
-       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
-       if (rc != MBX_SUCCESS) {
-               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
-                               "0351 Config MSI mailbox command failed, "
-                               "mbxCmd x%x, mbxStatus x%x\n",
-                               pmb->mb.mbxCommand, pmb->mb.mbxStatus);
-               goto mbx_fail_out;
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr_status = bf_get(lpfc_mbox_hdr_status,
+                            &del_fcf_record->header.cfg_shdr.response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+                                &del_fcf_record->header.cfg_shdr.response);
+       if (shdr_status || shdr_add_status || rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2516 DEL FCF of default FCF Index failed "
+                               "mbx status x%x, status x%x add_status x%x\n",
+                               rc, shdr_status, shdr_add_status);
        }
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mboxq, phba->mbox_mem_pool);
+}
 
-       /* Free memory allocated for mailbox command */
-       mempool_free(pmb, phba->mbox_mem_pool);
-       return rc;
+/**
+ * lpfc_sli4_parse_latt_fault - Parse sli4 link-attention link fault code
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link fault code and
+ * translate it into the base driver's read link attention mailbox command
+ * status.
+ *
+ * Return: Link-attention status in terms of base driver's coding.
+ **/
+static uint16_t
+lpfc_sli4_parse_latt_fault(struct lpfc_hba *phba,
+                          struct lpfc_acqe_link *acqe_link)
+{
+       uint16_t latt_fault;
 
-mbx_fail_out:
-       /* Free memory allocated for mailbox command */
-       mempool_free(pmb, phba->mbox_mem_pool);
+       switch (bf_get(lpfc_acqe_link_fault, acqe_link)) {
+       case LPFC_ASYNC_LINK_FAULT_NONE:
+       case LPFC_ASYNC_LINK_FAULT_LOCAL:
+       case LPFC_ASYNC_LINK_FAULT_REMOTE:
+               latt_fault = 0;
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0398 Invalid link fault code: x%x\n",
+                               bf_get(lpfc_acqe_link_fault, acqe_link));
+               latt_fault = MBXERR_ERROR;
+               break;
+       }
+       return latt_fault;
+}
 
-mem_fail_out:
-       /* free the irq already requested */
-       free_irq(phba->msix_entries[1].vector, phba);
+/**
+ * lpfc_sli4_parse_latt_type - Parse sli4 link attention type
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link attention type and translate it
+ * into the base driver's link attention type coding.
+ *
+ * Return: Link attention type in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_type(struct lpfc_hba *phba,
+                         struct lpfc_acqe_link *acqe_link)
+{
+       uint8_t att_type;
 
-irq_fail_out:
-       /* free the irq already requested */
-       free_irq(phba->msix_entries[0].vector, phba);
+       switch (bf_get(lpfc_acqe_link_status, acqe_link)) {
+       case LPFC_ASYNC_LINK_STATUS_DOWN:
+       case LPFC_ASYNC_LINK_STATUS_LOGICAL_DOWN:
+               att_type = AT_LINK_DOWN;
+               break;
+       case LPFC_ASYNC_LINK_STATUS_UP:
+               /* Ignore physical link up events - wait for logical link up */
+               att_type = AT_RESERVED;
+               break;
+       case LPFC_ASYNC_LINK_STATUS_LOGICAL_UP:
+               att_type = AT_LINK_UP;
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0399 Invalid link attention type: x%x\n",
+                               bf_get(lpfc_acqe_link_status, acqe_link));
+               att_type = AT_RESERVED;
+               break;
+       }
+       return att_type;
+}
 
-msi_fail_out:
-       /* Unconfigure MSI-X capability structure */
-       pci_disable_msix(phba->pcidev);
-       return rc;
+/**
+ * lpfc_sli4_parse_latt_link_speed - Parse sli4 link-attention link speed
+ * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
+ *
+ * This routine is to parse the SLI4 link-attention link speed and translate
+ * it into the base driver's link-attention link speed coding.
+ *
+ * Return: Link-attention link speed in terms of base driver's coding.
+ **/
+static uint8_t
+lpfc_sli4_parse_latt_link_speed(struct lpfc_hba *phba,
+                               struct lpfc_acqe_link *acqe_link)
+{
+       uint8_t link_speed;
+
+       switch (bf_get(lpfc_acqe_link_speed, acqe_link)) {
+       case LPFC_ASYNC_LINK_SPEED_ZERO:
+               link_speed = LA_UNKNW_LINK;
+               break;
+       case LPFC_ASYNC_LINK_SPEED_10MBPS:
+               link_speed = LA_UNKNW_LINK;
+               break;
+       case LPFC_ASYNC_LINK_SPEED_100MBPS:
+               link_speed = LA_UNKNW_LINK;
+               break;
+       case LPFC_ASYNC_LINK_SPEED_1GBPS:
+               link_speed = LA_1GHZ_LINK;
+               break;
+       case LPFC_ASYNC_LINK_SPEED_10GBPS:
+               link_speed = LA_10GHZ_LINK;
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0483 Invalid link-attention link speed: x%x\n",
+                               bf_get(lpfc_acqe_link_speed, acqe_link));
+               link_speed = LA_UNKNW_LINK;
+               break;
+       }
+       return link_speed;
 }
 
 /**
- * lpfc_disable_msix - Disable MSI-X interrupt mode
+ * lpfc_sli4_async_link_evt - Process the asynchronous link event
  * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async link completion queue entry.
  *
- * This routine is invoked to release the MSI-X vectors and then disable the
- * MSI-X interrupt mode.
+ * This routine is to handle the SLI4 asynchronous link event.
  **/
 static void
-lpfc_disable_msix(struct lpfc_hba *phba)
+lpfc_sli4_async_link_evt(struct lpfc_hba *phba,
+                        struct lpfc_acqe_link *acqe_link)
 {
-       int i;
+       struct lpfc_dmabuf *mp;
+       LPFC_MBOXQ_t *pmb;
+       MAILBOX_t *mb;
+       READ_LA_VAR *la;
+       uint8_t att_type;
 
-       /* Free up MSI-X multi-message vectors */
-       for (i = 0; i < LPFC_MSIX_VECTORS; i++)
-               free_irq(phba->msix_entries[i].vector, phba);
-       /* Disable MSI-X */
-       pci_disable_msix(phba->pcidev);
+       att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
+       if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
+               return;
+       pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0395 The mboxq allocation failed\n");
+               return;
+       }
+       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (!mp) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0396 The lpfc_dmabuf allocation failed\n");
+               goto out_free_pmb;
+       }
+       mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+       if (!mp->virt) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0397 The mbuf allocation failed\n");
+               goto out_free_dmabuf;
+       }
+
+       /* Cleanup any outstanding ELS commands */
+       lpfc_els_flush_all_cmd(phba);
+
+       /* Block ELS IOCBs until we have done process link event */
+       phba->sli.ring[LPFC_ELS_RING].flag |= LPFC_STOP_IOCB_EVENT;
+
+       /* Update link event statistics */
+       phba->sli.slistat.link_event++;
+
+       /* Create pseudo lpfc_handle_latt mailbox command from link ACQE */
+       lpfc_read_la(phba, pmb, mp);
+       pmb->vport = phba->pport;
+
+       /* Parse and translate status field */
+       mb = &pmb->u.mb;
+       mb->mbxStatus = lpfc_sli4_parse_latt_fault(phba, acqe_link);
+
+       /* Parse and translate link attention fields */
+       la = (READ_LA_VAR *) &pmb->u.mb.un.varReadLA;
+       la->eventTag = acqe_link->event_tag;
+       la->attType = att_type;
+       la->UlnkSpeed = lpfc_sli4_parse_latt_link_speed(phba, acqe_link);
+
+       /* Fake the the following irrelvant fields */
+       la->topology = TOPOLOGY_PT_PT;
+       la->granted_AL_PA = 0;
+       la->il = 0;
+       la->pb = 0;
+       la->fa = 0;
+       la->mm = 0;
+
+       /* Keep the link status for extra SLI4 state machine reference */
+       phba->sli4_hba.link_state.speed =
+                               bf_get(lpfc_acqe_link_speed, acqe_link);
+       phba->sli4_hba.link_state.duplex =
+                               bf_get(lpfc_acqe_link_duplex, acqe_link);
+       phba->sli4_hba.link_state.status =
+                               bf_get(lpfc_acqe_link_status, acqe_link);
+       phba->sli4_hba.link_state.physical =
+                               bf_get(lpfc_acqe_link_physical, acqe_link);
+       phba->sli4_hba.link_state.fault =
+                               bf_get(lpfc_acqe_link_fault, acqe_link);
+
+       /* Invoke the lpfc_handle_latt mailbox command callback function */
+       lpfc_mbx_cmpl_read_la(phba, pmb);
+
+       return;
+
+out_free_dmabuf:
+       kfree(mp);
+out_free_pmb:
+       mempool_free(pmb, phba->mbox_mem_pool);
 }
 
 /**
- * lpfc_enable_msi - Enable MSI interrupt mode
+ * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
  * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async fcoe completion queue entry.
  *
- * This routine is invoked to enable the MSI interrupt mode. The kernel
- * function pci_enable_msi() is called to enable the MSI vector. The
- * device driver is responsible for calling the request_irq() to register
- * MSI vector with a interrupt the handler, which is done in this function.
- *
- * Return codes
- *     0 - sucessful
- *     other values - error
- */
-static int
-lpfc_enable_msi(struct lpfc_hba *phba)
+ * This routine is to handle the SLI4 asynchronous fcoe event.
+ **/
+static void
+lpfc_sli4_async_fcoe_evt(struct lpfc_hba *phba,
+                        struct lpfc_acqe_fcoe *acqe_fcoe)
 {
+       uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
        int rc;
 
-       rc = pci_enable_msi(phba->pcidev);
-       if (!rc)
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "0462 PCI enable MSI mode success.\n");
-       else {
-               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                               "0471 PCI enable MSI mode failed (%d)\n", rc);
-               return rc;
-       }
+       switch (event_type) {
+       case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                       "2546 New FCF found index 0x%x tag 0x%x \n",
+                       acqe_fcoe->fcf_index,
+                       acqe_fcoe->event_tag);
+               /*
+                * If the current FCF is in discovered state,
+                * do nothing.
+                */
+               spin_lock_irq(&phba->hbalock);
+               if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
+                       spin_unlock_irq(&phba->hbalock);
+                       break;
+               }
+               spin_unlock_irq(&phba->hbalock);
 
-       rc = request_irq(phba->pcidev->irq, lpfc_intr_handler,
-                        IRQF_SHARED, LPFC_DRIVER_NAME, phba);
-       if (rc) {
-               pci_disable_msi(phba->pcidev);
-               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
-                               "0478 MSI request_irq failed (%d)\n", rc);
+               /* Read the FCF table and re-discover SAN. */
+               rc = lpfc_sli4_read_fcf_record(phba,
+                       LPFC_FCOE_FCF_GET_FIRST);
+               if (rc)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                               "2547 Read FCF record failed 0x%x\n",
+                               rc);
+               break;
+
+       case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "2548 FCF Table full count 0x%x tag 0x%x \n",
+                       bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
+                       acqe_fcoe->event_tag);
+               break;
+
+       case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
+               lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+                       "2549 FCF disconnected fron network index 0x%x"
+                       " tag 0x%x \n", acqe_fcoe->fcf_index,
+                       acqe_fcoe->event_tag);
+               /* If the event is not for currently used fcf do nothing */
+               if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
+                       break;
+               /*
+                * Currently, driver support only one FCF - so treat this as
+                * a link down.
+                */
+               lpfc_linkdown(phba);
+               /* Unregister FCF if no devices connected to it */
+               lpfc_unregister_unused_fcf(phba);
+               break;
+
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "0288 Unknown FCoE event type 0x%x event tag "
+                       "0x%x\n", event_type, acqe_fcoe->event_tag);
+               break;
        }
-       return rc;
 }
 
 /**
- * lpfc_disable_msi - Disable MSI interrupt mode
+ * lpfc_sli4_async_dcbx_evt - Process the asynchronous dcbx event
  * @phba: pointer to lpfc hba data structure.
+ * @acqe_link: pointer to the async dcbx completion queue entry.
  *
- * This routine is invoked to disable the MSI interrupt mode. The driver
- * calls free_irq() on MSI vector it has done request_irq() on before
- * calling pci_disable_msi(). Failure to do so results in a BUG_ON() and
- * a device will be left with MSI enabled and leaks its vector.
- */
-
+ * This routine is to handle the SLI4 asynchronous dcbx event.
+ **/
 static void
-lpfc_disable_msi(struct lpfc_hba *phba)
+lpfc_sli4_async_dcbx_evt(struct lpfc_hba *phba,
+                        struct lpfc_acqe_dcbx *acqe_dcbx)
 {
-       free_irq(phba->pcidev->irq, phba);
-       pci_disable_msi(phba->pcidev);
-       return;
+       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "0290 The SLI4 DCBX asynchronous event is not "
+                       "handled yet\n");
+}
+
+/**
+ * lpfc_sli4_async_event_proc - Process all the pending asynchronous event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 asynchronous events.
+ **/
+void lpfc_sli4_async_event_proc(struct lpfc_hba *phba)
+{
+       struct lpfc_cq_event *cq_event;
+
+       /* First, declare the async event has been handled */
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag &= ~ASYNC_EVENT;
+       spin_unlock_irq(&phba->hbalock);
+       /* Now, handle all the async events */
+       while (!list_empty(&phba->sli4_hba.sp_asynce_work_queue)) {
+               /* Get the first event from the head of the event queue */
+               spin_lock_irq(&phba->hbalock);
+               list_remove_head(&phba->sli4_hba.sp_asynce_work_queue,
+                                cq_event, struct lpfc_cq_event, list);
+               spin_unlock_irq(&phba->hbalock);
+               /* Process the asynchronous event */
+               switch (bf_get(lpfc_trailer_code, &cq_event->cqe.mcqe_cmpl)) {
+               case LPFC_TRAILER_CODE_LINK:
+                       lpfc_sli4_async_link_evt(phba,
+                                                &cq_event->cqe.acqe_link);
+                       break;
+               case LPFC_TRAILER_CODE_FCOE:
+                       lpfc_sli4_async_fcoe_evt(phba,
+                                                &cq_event->cqe.acqe_fcoe);
+                       break;
+               case LPFC_TRAILER_CODE_DCBX:
+                       lpfc_sli4_async_dcbx_evt(phba,
+                                                &cq_event->cqe.acqe_dcbx);
+                       break;
+               default:
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "1804 Invalid asynchrous event code: "
+                                       "x%x\n", bf_get(lpfc_trailer_code,
+                                       &cq_event->cqe.mcqe_cmpl));
+                       break;
+               }
+               /* Free the completion event processed to the free pool */
+               lpfc_sli4_cq_event_release(phba, cq_event);
+       }
+}
+
+/**
+ * lpfc_api_table_setup - Set up per hba pci-device group func api jump table
+ * @phba: pointer to lpfc hba data structure.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine is invoked to set up the per HBA PCI-Device group function
+ * API jump table entries.
+ *
+ * Return: 0 if success, otherwise -ENODEV
+ **/
+int
+lpfc_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+       int rc;
+
+       /* Set up lpfc PCI-device group */
+       phba->pci_dev_grp = dev_grp;
+
+       /* The LPFC_PCI_DEV_OC uses SLI4 */
+       if (dev_grp == LPFC_PCI_DEV_OC)
+               phba->sli_rev = LPFC_SLI_REV4;
+
+       /* Set up device INIT API function jump table */
+       rc = lpfc_init_api_table_setup(phba, dev_grp);
+       if (rc)
+               return -ENODEV;
+       /* Set up SCSI API function jump table */
+       rc = lpfc_scsi_api_table_setup(phba, dev_grp);
+       if (rc)
+               return -ENODEV;
+       /* Set up SLI API function jump table */
+       rc = lpfc_sli_api_table_setup(phba, dev_grp);
+       if (rc)
+               return -ENODEV;
+       /* Set up MBOX API function jump table */
+       rc = lpfc_mbox_api_table_setup(phba, dev_grp);
+       if (rc)
+               return -ENODEV;
+
+       return 0;
 }
 
 /**
@@ -2509,9 +3068,8 @@ lpfc_disable_msi(struct lpfc_hba *phba)
  *
  * This routine it invoked to log the currently used active interrupt mode
  * to the device.
- */
-static void
-lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
+ **/
+static void lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
 {
        switch (intr_mode) {
        case 0:
@@ -2534,659 +3092,4383 @@ lpfc_log_intr_mode(struct lpfc_hba *phba, uint32_t intr_mode)
        return;
 }
 
-static void
-lpfc_stop_port(struct lpfc_hba *phba)
-{
-       /* Clear all interrupt enable conditions */
-       writel(0, phba->HCregaddr);
-       readl(phba->HCregaddr); /* flush */
-       /* Clear all pending interrupts */
-       writel(0xffffffff, phba->HAregaddr);
-       readl(phba->HAregaddr); /* flush */
-
-       /* Reset some HBA SLI setup states */
-       lpfc_stop_phba_timers(phba);
-       phba->pport->work_port_events = 0;
-
-       return;
-}
-
 /**
- * lpfc_enable_intr - Enable device interrupt
+ * lpfc_enable_pci_dev - Enable a generic PCI device.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to enable device interrupt and associate driver's
- * interrupt handler(s) to interrupt vector(s). Depends on the interrupt
- * mode configured to the driver, the driver will try to fallback from the
- * configured interrupt mode to an interrupt mode which is supported by the
- * platform, kernel, and device in the order of: MSI-X -> MSI -> IRQ.
+ * This routine is invoked to enable the PCI device that is common to all
+ * PCI devices.
  *
  * Return codes
- *   0 - sucessful
- *   other values - error
+ *     0 - sucessful
+ *     other values - error
  **/
-static uint32_t
-lpfc_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+static int
+lpfc_enable_pci_dev(struct lpfc_hba *phba)
 {
-       uint32_t intr_mode = LPFC_INTR_ERROR;
-       int retval;
+       struct pci_dev *pdev;
+       int bars;
 
-       if (cfg_mode == 2) {
-               /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
-               retval = lpfc_sli_config_port(phba, 3);
-               if (!retval) {
-                       /* Now, try to enable MSI-X interrupt mode */
-                       retval = lpfc_enable_msix(phba);
-                       if (!retval) {
-                               /* Indicate initialization to MSI-X mode */
-                               phba->intr_type = MSIX;
-                               intr_mode = 2;
-                       }
-               }
-       }
+       /* Obtain PCI device reference */
+       if (!phba->pcidev)
+               goto out_error;
+       else
+               pdev = phba->pcidev;
+       /* Select PCI BARs */
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       /* Enable PCI device */
+       if (pci_enable_device_mem(pdev))
+               goto out_error;
+       /* Request PCI resource for the device */
+       if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
+               goto out_disable_device;
+       /* Set up device as PCI master and save state for EEH */
+       pci_set_master(pdev);
+       pci_try_set_mwi(pdev);
+       pci_save_state(pdev);
 
-       /* Fallback to MSI if MSI-X initialization failed */
-       if (cfg_mode >= 1 && phba->intr_type == NONE) {
-               retval = lpfc_enable_msi(phba);
-               if (!retval) {
-                       /* Indicate initialization to MSI mode */
-                       phba->intr_type = MSI;
-                       intr_mode = 1;
-               }
-       }
+       return 0;
 
-       /* Fallback to INTx if both MSI-X/MSI initalization failed */
-       if (phba->intr_type == NONE) {
-               retval = request_irq(phba->pcidev->irq, lpfc_intr_handler,
-                                    IRQF_SHARED, LPFC_DRIVER_NAME, phba);
-               if (!retval) {
-                       /* Indicate initialization to INTx mode */
-                       phba->intr_type = INTx;
-                       intr_mode = 0;
-               }
-       }
-       return intr_mode;
+out_disable_device:
+       pci_disable_device(pdev);
+out_error:
+       return -ENODEV;
 }
 
 /**
- * lpfc_disable_intr - Disable device interrupt
+ * lpfc_disable_pci_dev - Disable a generic PCI device.
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine is invoked to disable device interrupt and disassociate the
- * driver's interrupt handler(s) from interrupt vector(s). Depending on the
- * interrupt mode, the driver will release the interrupt vector(s) for the
- * message signaled interrupt.
+ * This routine is invoked to disable the PCI device that is common to all
+ * PCI devices.
  **/
 static void
-lpfc_disable_intr(struct lpfc_hba *phba)
+lpfc_disable_pci_dev(struct lpfc_hba *phba)
 {
-       /* Disable the currently initialized interrupt mode */
-       if (phba->intr_type == MSIX)
-               lpfc_disable_msix(phba);
-       else if (phba->intr_type == MSI)
-               lpfc_disable_msi(phba);
-       else if (phba->intr_type == INTx)
-               free_irq(phba->pcidev->irq, phba);
+       struct pci_dev *pdev;
+       int bars;
 
-       /* Reset interrupt management states */
-       phba->intr_type = NONE;
-       phba->sli.slistat.sli_intr = 0;
+       /* Obtain PCI device reference */
+       if (!phba->pcidev)
+               return;
+       else
+               pdev = phba->pcidev;
+       /* Select PCI BARs */
+       bars = pci_select_bars(pdev, IORESOURCE_MEM);
+       /* Release PCI resource and disable PCI device */
+       pci_release_selected_regions(pdev, bars);
+       pci_disable_device(pdev);
+       /* Null out PCI private reference to driver */
+       pci_set_drvdata(pdev, NULL);
 
        return;
 }
 
 /**
- * lpfc_pci_probe_one - lpfc PCI probe func to register device to PCI subsystem
- * @pdev: pointer to PCI device
- * @pid: pointer to PCI device identifier
- *
- * This routine is to be registered to the kernel's PCI subsystem. When an
- * Emulex HBA is presented in PCI bus, the kernel PCI subsystem looks at
- * PCI device-specific information of the device and driver to see if the
- * driver state that it can support this kind of device. If the match is
- * successful, the driver core invokes this routine. If this routine
- * determines it can claim the HBA, it does all the initialization that it
- * needs to do to handle the HBA properly.
+ * lpfc_reset_hba - Reset a hba
+ * @phba: pointer to lpfc hba data structure.
  *
- * Return code
- *   0 - driver can claim the device
- *   negative value - driver can not claim the device
+ * This routine is invoked to reset a hba device. It brings the HBA
+ * offline, performs a board restart, and then brings the board back
+ * online. The lpfc_offline calls lpfc_sli_hba_down which will clean up
+ * on outstanding mailbox commands.
  **/
-static int __devinit
-lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+void
+lpfc_reset_hba(struct lpfc_hba *phba)
 {
-       struct lpfc_vport *vport = NULL;
-       struct lpfc_hba   *phba;
-       struct lpfc_sli   *psli;
-       struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
-       struct Scsi_Host  *shost = NULL;
-       void *ptr;
-       unsigned long bar0map_len, bar2map_len;
-       int error = -ENODEV, retval;
-       int  i, hbq_count;
-       uint16_t iotag;
-       uint32_t cfg_mode, intr_mode;
-       int bars = pci_select_bars(pdev, IORESOURCE_MEM);
-       struct lpfc_adapter_event_header adapter_event;
-
-       if (pci_enable_device_mem(pdev))
-               goto out;
-       if (pci_request_selected_regions(pdev, bars, LPFC_DRIVER_NAME))
-               goto out_disable_device;
-
-       phba = kzalloc(sizeof (struct lpfc_hba), GFP_KERNEL);
-       if (!phba)
-               goto out_release_regions;
-
-       atomic_set(&phba->fast_event_count, 0);
-       spin_lock_init(&phba->hbalock);
-
-       /* Initialize ndlp management spinlock */
-       spin_lock_init(&phba->ndlp_lock);
-
-       phba->pcidev = pdev;
+       /* If resets are disabled then set error state and return. */
+       if (!phba->cfg_enable_hba_reset) {
+               phba->link_state = LPFC_HBA_ERROR;
+               return;
+       }
+       lpfc_offline_prep(phba);
+       lpfc_offline(phba);
+       lpfc_sli_brdrestart(phba);
+       lpfc_online(phba);
+       lpfc_unblock_mgmt_io(phba);
+}
 
-       /* Assign an unused board number */
-       if ((phba->brd_no = lpfc_get_instance()) < 0)
-               goto out_free_phba;
+/**
+ * lpfc_sli_driver_resource_setup - Setup driver internal resources for SLI3 dev.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-3 HBA device it attached to.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_sli_driver_resource_setup(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli;
 
-       INIT_LIST_HEAD(&phba->port_list);
-       init_waitqueue_head(&phba->wait_4_mlo_m_q);
        /*
-        * Get all the module params for configuring this host and then
-        * establish the host.
+        * Initialize timers used by driver
         */
-       lpfc_get_cfgparam(phba);
-       phba->max_vpi = LPFC_MAX_VPI;
 
-       /* Initialize timers used by driver */
+       /* Heartbeat timer */
        init_timer(&phba->hb_tmofunc);
        phba->hb_tmofunc.function = lpfc_hb_timeout;
        phba->hb_tmofunc.data = (unsigned long)phba;
 
        psli = &phba->sli;
+       /* MBOX heartbeat timer */
        init_timer(&psli->mbox_tmo);
        psli->mbox_tmo.function = lpfc_mbox_timeout;
        psli->mbox_tmo.data = (unsigned long) phba;
+       /* FCP polling mode timer */
        init_timer(&phba->fcp_poll_timer);
        phba->fcp_poll_timer.function = lpfc_poll_timeout;
        phba->fcp_poll_timer.data = (unsigned long) phba;
+       /* Fabric block timer */
        init_timer(&phba->fabric_block_timer);
        phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
        phba->fabric_block_timer.data = (unsigned long) phba;
+       /* EA polling mode timer */
        init_timer(&phba->eratt_poll);
        phba->eratt_poll.function = lpfc_poll_eratt;
        phba->eratt_poll.data = (unsigned long) phba;
 
-       pci_set_master(pdev);
-       pci_save_state(pdev);
-       pci_try_set_mwi(pdev);
+       /* Host attention work mask setup */
+       phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
+       phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+
+       /* Get all the module params for configuring this host */
+       lpfc_get_cfgparam(phba);
+       /*
+        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * used to create the sg_dma_buf_pool must be dynamically calculated.
+        * 2 segments are added since the IOCB needs a command and response bde.
+        */
+       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd) +
+               sizeof(struct fcp_rsp) +
+                       ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct ulp_bde64));
+
+       if (phba->cfg_enable_bg) {
+               phba->cfg_sg_seg_cnt = LPFC_MAX_SG_SEG_CNT;
+               phba->cfg_sg_dma_buf_size +=
+                       phba->cfg_prot_sg_seg_cnt * sizeof(struct ulp_bde64);
+       }
 
-       if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(64)) != 0)
-               if (pci_set_dma_mask(phba->pcidev, DMA_BIT_MASK(32)) != 0)
-                       goto out_idr_remove;
+       /* Also reinitialize the host templates with new values. */
+       lpfc_vport_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+       lpfc_template.sg_tablesize = phba->cfg_sg_seg_cnt;
+
+       phba->max_vpi = LPFC_MAX_VPI;
+       /* This will be set to correct value after config_port mbox */
+       phba->max_vports = 0;
 
        /*
-        * Get the bus address of Bar0 and Bar2 and the number of bytes
-        * required by each mapping.
+        * Initialize the SLI Layer to run with lpfc HBAs.
         */
-       phba->pci_bar0_map = pci_resource_start(phba->pcidev, 0);
-       bar0map_len        = pci_resource_len(phba->pcidev, 0);
+       lpfc_sli_setup(phba);
+       lpfc_sli_queue_setup(phba);
 
-       phba->pci_bar2_map = pci_resource_start(phba->pcidev, 2);
-       bar2map_len        = pci_resource_len(phba->pcidev, 2);
+       /* Allocate device driver memory */
+       if (lpfc_mem_alloc(phba, BPL_ALIGN_SZ))
+               return -ENOMEM;
 
-       /* Map HBA SLIM to a kernel virtual address. */
-       phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
-       if (!phba->slim_memmap_p) {
-               error = -ENODEV;
-               dev_printk(KERN_ERR, &pdev->dev,
-                          "ioremap failed for SLIM memory.\n");
-               goto out_idr_remove;
-       }
+       return 0;
+}
 
-       /* Map HBA Control Registers to a kernel virtual address. */
-       phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
-       if (!phba->ctrl_regs_memmap_p) {
-               error = -ENODEV;
-               dev_printk(KERN_ERR, &pdev->dev,
-                          "ioremap failed for HBA control registers.\n");
-               goto out_iounmap_slim;
-       }
+/**
+ * lpfc_sli_driver_resource_unset - Unset drvr internal resources for SLI3 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-3 HBA device it attached to.
+ **/
+static void
+lpfc_sli_driver_resource_unset(struct lpfc_hba *phba)
+{
+       /* Free device driver memory allocated */
+       lpfc_mem_free_all(phba);
 
-       /* Allocate memory for SLI-2 structures */
-       phba->slim2p.virt = dma_alloc_coherent(&phba->pcidev->dev,
-                                              SLI2_SLIM_SIZE,
-                                              &phba->slim2p.phys,
-                                              GFP_KERNEL);
-       if (!phba->slim2p.virt)
-               goto out_iounmap;
+       return;
+}
 
-       memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
-       phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
-       phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
-       phba->IOCBs = (phba->slim2p.virt +
-                      offsetof(struct lpfc_sli2_slim, IOCBs));
+/**
+ * lpfc_sli4_driver_resource_setup - Setup drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources specific to
+ * support the SLI-4 HBA device it attached to.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_sli4_driver_resource_setup(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli;
+       int rc;
+       int i, hbq_count;
 
-       phba->hbqslimp.virt = dma_alloc_coherent(&phba->pcidev->dev,
-                                                lpfc_sli_hbq_size(),
-                                                &phba->hbqslimp.phys,
-                                                GFP_KERNEL);
-       if (!phba->hbqslimp.virt)
-               goto out_free_slim;
+       /* Before proceed, wait for POST done and device ready */
+       rc = lpfc_sli4_post_status_check(phba);
+       if (rc)
+               return -ENODEV;
+
+       /*
+        * Initialize timers used by driver
+        */
+
+       /* Heartbeat timer */
+       init_timer(&phba->hb_tmofunc);
+       phba->hb_tmofunc.function = lpfc_hb_timeout;
+       phba->hb_tmofunc.data = (unsigned long)phba;
+
+       psli = &phba->sli;
+       /* MBOX heartbeat timer */
+       init_timer(&psli->mbox_tmo);
+       psli->mbox_tmo.function = lpfc_mbox_timeout;
+       psli->mbox_tmo.data = (unsigned long) phba;
+       /* Fabric block timer */
+       init_timer(&phba->fabric_block_timer);
+       phba->fabric_block_timer.function = lpfc_fabric_block_timeout;
+       phba->fabric_block_timer.data = (unsigned long) phba;
+       /* EA polling mode timer */
+       init_timer(&phba->eratt_poll);
+       phba->eratt_poll.function = lpfc_poll_eratt;
+       phba->eratt_poll.data = (unsigned long) phba;
+       /*
+        * We need to do a READ_CONFIG mailbox command here before
+        * calling lpfc_get_cfgparam. For VFs this will report the
+        * MAX_XRI, MAX_VPI, MAX_RPI, MAX_IOCB, and MAX_VFI settings.
+        * All of the resources allocated
+        * for this Port are tied to these values.
+        */
+       /* Get all the module params for configuring this host */
+       lpfc_get_cfgparam(phba);
+       phba->max_vpi = LPFC_MAX_VPI;
+       /* This will be set to correct value after the read_config mbox */
+       phba->max_vports = 0;
+
+       /* Program the default value of vlan_id and fc_map */
+       phba->valid_vlan = 0;
+       phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+       phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+       phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+       /*
+        * Since the sg_tablesize is module parameter, the sg_dma_buf_size
+        * used to create the sg_dma_buf_pool must be dynamically calculated.
+        * 2 segments are added since the IOCB needs a command and response bde.
+        * To insure that the scsi sgl does not cross a 4k page boundary only
+        * sgl sizes of 1k, 2k, 4k, and 8k are supported.
+        * Table of sgl sizes and seg_cnt:
+        * sgl size,    sg_seg_cnt      total seg
+        * 1k           50              52
+        * 2k           114             116
+        * 4k           242             244
+        * 8k           498             500
+        * cmd(32) + rsp(160) + (52 * sizeof(sli4_sge)) = 1024
+        * cmd(32) + rsp(160) + (116 * sizeof(sli4_sge)) = 2048
+        * cmd(32) + rsp(160) + (244 * sizeof(sli4_sge)) = 4096
+        * cmd(32) + rsp(160) + (500 * sizeof(sli4_sge)) = 8192
+        */
+       if (phba->cfg_sg_seg_cnt <= LPFC_DEFAULT_SG_SEG_CNT)
+               phba->cfg_sg_seg_cnt = 50;
+       else if (phba->cfg_sg_seg_cnt <= 114)
+               phba->cfg_sg_seg_cnt = 114;
+       else if (phba->cfg_sg_seg_cnt <= 242)
+               phba->cfg_sg_seg_cnt = 242;
+       else
+               phba->cfg_sg_seg_cnt = 498;
 
+       phba->cfg_sg_dma_buf_size = sizeof(struct fcp_cmnd)
+                                       + sizeof(struct fcp_rsp);
+       phba->cfg_sg_dma_buf_size +=
+               ((phba->cfg_sg_seg_cnt + 2) * sizeof(struct sli4_sge));
+
+       /* Initialize buffer queue management fields */
        hbq_count = lpfc_sli_hbq_count();
-       ptr = phba->hbqslimp.virt;
-       for (i = 0; i < hbq_count; ++i) {
-               phba->hbqs[i].hbq_virt = ptr;
+       for (i = 0; i < hbq_count; ++i)
                INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
-               ptr += (lpfc_hbq_defs[i]->entry_count *
-                       sizeof(struct lpfc_hbq_entry));
-       }
-       phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
-       phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer  = lpfc_els_hbq_free;
+       INIT_LIST_HEAD(&phba->rb_pend_list);
+       phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_sli4_rb_alloc;
+       phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_sli4_rb_free;
 
-       memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+       /*
+        * Initialize the SLI Layer to run with lpfc SLI4 HBAs.
+        */
+       /* Initialize the Abort scsi buffer list used by driver */
+       spin_lock_init(&phba->sli4_hba.abts_scsi_buf_list_lock);
+       INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+       /* This abort list used by worker thread */
+       spin_lock_init(&phba->sli4_hba.abts_sgl_list_lock);
 
-       INIT_LIST_HEAD(&phba->hbqbuf_in_list);
+       /*
+        * Initialize dirver internal slow-path work queues
+        */
 
-       /* Initialize the SLI Layer to run with lpfc HBAs. */
+       /* Driver internel slow-path CQ Event pool */
+       INIT_LIST_HEAD(&phba->sli4_hba.sp_cqe_event_pool);
+       /* Response IOCB work queue list */
+       INIT_LIST_HEAD(&phba->sli4_hba.sp_rspiocb_work_queue);
+       /* Asynchronous event CQ Event work queue list */
+       INIT_LIST_HEAD(&phba->sli4_hba.sp_asynce_work_queue);
+       /* Fast-path XRI aborted CQ Event work queue list */
+       INIT_LIST_HEAD(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+       /* Slow-path XRI aborted CQ Event work queue list */
+       INIT_LIST_HEAD(&phba->sli4_hba.sp_els_xri_aborted_work_queue);
+       /* Receive queue CQ Event work queue list */
+       INIT_LIST_HEAD(&phba->sli4_hba.sp_unsol_work_queue);
+
+       /* Initialize the driver internal SLI layer lists. */
        lpfc_sli_setup(phba);
        lpfc_sli_queue_setup(phba);
 
-       retval = lpfc_mem_alloc(phba);
-       if (retval) {
-               error = retval;
-               goto out_free_hbqslimp;
-       }
+       /* Allocate device driver memory */
+       rc = lpfc_mem_alloc(phba, SGL_ALIGN_SZ);
+       if (rc)
+               return -ENOMEM;
 
-       /* Initialize and populate the iocb list per host.  */
-       INIT_LIST_HEAD(&phba->lpfc_iocb_list);
-       for (i = 0; i < LPFC_IOCB_LIST_CNT; i++) {
-               iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
-               if (iocbq_entry == NULL) {
-                       printk(KERN_ERR "%s: only allocated %d iocbs of "
-                               "expected %d count. Unloading driver.\n",
-                               __func__, i, LPFC_IOCB_LIST_CNT);
-                       error = -ENOMEM;
-                       goto out_free_iocbq;
-               }
+       /* Create the bootstrap mailbox command */
+       rc = lpfc_create_bootstrap_mbox(phba);
+       if (unlikely(rc))
+               goto out_free_mem;
 
-               iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
-               if (iotag == 0) {
-                       kfree (iocbq_entry);
-                       printk(KERN_ERR "%s: failed to allocate IOTAG. "
-                              "Unloading driver.\n",
-                               __func__);
-                       error = -ENOMEM;
-                       goto out_free_iocbq;
-               }
+       /* Set up the host's endian order with the device. */
+       rc = lpfc_setup_endian_order(phba);
+       if (unlikely(rc))
+               goto out_free_bsmbx;
 
-               spin_lock_irq(&phba->hbalock);
-               list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
-               phba->total_iocbq_bufs++;
-               spin_unlock_irq(&phba->hbalock);
-       }
+       /* Set up the hba's configuration parameters. */
+       rc = lpfc_sli4_read_config(phba);
+       if (unlikely(rc))
+               goto out_free_bsmbx;
 
-       /* Initialize HBA structure */
-       phba->fc_edtov = FF_DEF_EDTOV;
-       phba->fc_ratov = FF_DEF_RATOV;
-       phba->fc_altov = FF_DEF_ALTOV;
-       phba->fc_arbtov = FF_DEF_ARBTOV;
+       /* Perform a function reset */
+       rc = lpfc_pci_function_reset(phba);
+       if (unlikely(rc))
+               goto out_free_bsmbx;
 
-       INIT_LIST_HEAD(&phba->work_list);
-       phba->work_ha_mask = (HA_ERATT | HA_MBATT | HA_LATT);
-       phba->work_ha_mask |= (HA_RXMASK << (LPFC_ELS_RING * 4));
+       /* Create all the SLI4 queues */
+       rc = lpfc_sli4_queue_create(phba);
+       if (rc)
+               goto out_free_bsmbx;
 
-       /* Initialize the wait queue head for the kernel thread */
-       init_waitqueue_head(&phba->work_waitq);
+       /* Create driver internal CQE event pool */
+       rc = lpfc_sli4_cq_event_pool_create(phba);
+       if (rc)
+               goto out_destroy_queue;
 
-       /* Startup the kernel thread for this host adapter. */
-       phba->worker_thread = kthread_run(lpfc_do_work, phba,
-                                      "lpfc_worker_%d", phba->brd_no);
-       if (IS_ERR(phba->worker_thread)) {
-               error = PTR_ERR(phba->worker_thread);
-               goto out_free_iocbq;
+       /* Initialize and populate the iocb list per host */
+       rc = lpfc_init_sgl_list(phba);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1400 Failed to initialize sgl list.\n");
+               goto out_destroy_cq_event_pool;
+       }
+       rc = lpfc_init_active_sgl_array(phba);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1430 Failed to initialize sgl list.\n");
+               goto out_free_sgl_list;
        }
 
-       /* Initialize the list of scsi buffers used by driver for scsi IO. */
-       spin_lock_init(&phba->scsi_buf_list_lock);
-       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+       rc = lpfc_sli4_init_rpi_hdrs(phba);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1432 Failed to initialize rpi headers.\n");
+               goto out_free_active_sgl;
+       }
 
-       /* Initialize list of fabric iocbs */
-       INIT_LIST_HEAD(&phba->fabric_iocb_list);
+       phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
+                                   phba->cfg_fcp_eq_count), GFP_KERNEL);
+       if (!phba->sli4_hba.fcp_eq_hdl) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2572 Failed allocate memory for fast-path "
+                               "per-EQ handle array\n");
+               goto out_remove_rpi_hdrs;
+       }
 
-       /* Initialize list to save ELS buffers */
-       INIT_LIST_HEAD(&phba->elsbuf);
+       phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
+                                     phba->sli4_hba.cfg_eqn), GFP_KERNEL);
+       if (!phba->sli4_hba.msix_entries) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2573 Failed allocate memory for msi-x "
+                               "interrupt vector entries\n");
+               goto out_free_fcp_eq_hdl;
+       }
 
-       vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
-       if (!vport)
-               goto out_kthread_stop;
+       return rc;
 
-       shost = lpfc_shost_from_vport(vport);
-       phba->pport = vport;
-       lpfc_debugfs_initialize(vport);
+out_free_fcp_eq_hdl:
+       kfree(phba->sli4_hba.fcp_eq_hdl);
+out_remove_rpi_hdrs:
+       lpfc_sli4_remove_rpi_hdrs(phba);
+out_free_active_sgl:
+       lpfc_free_active_sgl(phba);
+out_free_sgl_list:
+       lpfc_free_sgl_list(phba);
+out_destroy_cq_event_pool:
+       lpfc_sli4_cq_event_pool_destroy(phba);
+out_destroy_queue:
+       lpfc_sli4_queue_destroy(phba);
+out_free_bsmbx:
+       lpfc_destroy_bootstrap_mbox(phba);
+out_free_mem:
+       lpfc_mem_free(phba);
+       return rc;
+}
 
-       pci_set_drvdata(pdev, shost);
+/**
+ * lpfc_sli4_driver_resource_unset - Unset drvr internal resources for SLI4 dev
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up
+ * specific for supporting the SLI-4 HBA device it attached to.
+ **/
+static void
+lpfc_sli4_driver_resource_unset(struct lpfc_hba *phba)
+{
+       struct lpfc_fcf_conn_entry *conn_entry, *next_conn_entry;
 
-       phba->MBslimaddr = phba->slim_memmap_p;
-       phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
-       phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
-       phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
-       phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+       /* unregister default FCFI from the HBA */
+       lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
 
-       /* Configure sysfs attributes */
-       if (lpfc_alloc_sysfs_attr(vport)) {
-               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "1476 Failed to allocate sysfs attr\n");
-               error = -ENOMEM;
-               goto out_destroy_port;
-       }
+       /* Free the default FCR table */
+       lpfc_sli_remove_dflt_fcf(phba);
 
-       cfg_mode = phba->cfg_use_msi;
-       while (true) {
-               /* Configure and enable interrupt */
-               intr_mode = lpfc_enable_intr(phba, cfg_mode);
-               if (intr_mode == LPFC_INTR_ERROR) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "0426 Failed to enable interrupt.\n");
-                       goto out_free_sysfs_attr;
-               }
-               /* HBA SLI setup */
-               if (lpfc_sli_hba_setup(phba)) {
-                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                       "1477 Failed to set up hba\n");
-                       error = -ENODEV;
-                       goto out_remove_device;
-               }
+       /* Free memory allocated for msi-x interrupt vector entries */
+       kfree(phba->sli4_hba.msix_entries);
 
-               /* Wait 50ms for the interrupts of previous mailbox commands */
-               msleep(50);
-               /* Check active interrupts received */
-               if (phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
-                       /* Log the current active interrupt mode */
-                       phba->intr_mode = intr_mode;
-                       lpfc_log_intr_mode(phba, intr_mode);
-                       break;
-               } else {
-                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                                       "0451 Configure interrupt mode (%d) "
-                                       "failed active interrupt test.\n",
-                                       intr_mode);
-                       if (intr_mode == 0) {
-                               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                                               "0479 Failed to enable "
-                                               "interrupt.\n");
-                               error = -ENODEV;
-                               goto out_remove_device;
-                       }
-                       /* Stop HBA SLI setups */
-                       lpfc_stop_port(phba);
-                       /* Disable the current interrupt mode */
-                       lpfc_disable_intr(phba);
-                       /* Try next level of interrupt mode */
-                       cfg_mode = --intr_mode;
-               }
+       /* Free memory allocated for fast-path work queue handles */
+       kfree(phba->sli4_hba.fcp_eq_hdl);
+
+       /* Free the allocated rpi headers. */
+       lpfc_sli4_remove_rpi_hdrs(phba);
+
+       /* Free the ELS sgl list */
+       lpfc_free_active_sgl(phba);
+       lpfc_free_sgl_list(phba);
+
+       /* Free the SCSI sgl management array */
+       kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+
+       /* Free the SLI4 queues */
+       lpfc_sli4_queue_destroy(phba);
+
+       /* Free the completion queue EQ event pool */
+       lpfc_sli4_cq_event_release_all(phba);
+       lpfc_sli4_cq_event_pool_destroy(phba);
+
+       /* Reset SLI4 HBA FCoE function */
+       lpfc_pci_function_reset(phba);
+
+       /* Free the bsmbx region. */
+       lpfc_destroy_bootstrap_mbox(phba);
+
+       /* Free the SLI Layer memory with SLI4 HBAs */
+       lpfc_mem_free_all(phba);
+
+       /* Free the current connect table */
+       list_for_each_entry_safe(conn_entry, next_conn_entry,
+               &phba->fcf_conn_rec_list, list)
+               kfree(conn_entry);
+
+       return;
+}
+
+/**
+ * lpfc_init_api_table_setup - Set up init api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the device INIT interface API function jump table
+ * in @phba struct.
+ *
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_init_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+       switch (dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               phba->lpfc_hba_down_post = lpfc_hba_down_post_s3;
+               phba->lpfc_handle_eratt = lpfc_handle_eratt_s3;
+               phba->lpfc_stop_port = lpfc_stop_port_s3;
+               break;
+       case LPFC_PCI_DEV_OC:
+               phba->lpfc_hba_down_post = lpfc_hba_down_post_s4;
+               phba->lpfc_handle_eratt = lpfc_handle_eratt_s4;
+               phba->lpfc_stop_port = lpfc_stop_port_s4;
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1431 Invalid HBA PCI-device group: 0x%x\n",
+                               dev_grp);
+               return -ENODEV;
+               break;
        }
+       return 0;
+}
 
+/**
+ * lpfc_setup_driver_resource_phase1 - Phase1 etup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources before the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase1(struct lpfc_hba *phba)
+{
        /*
-        * hba setup may have changed the hba_queue_depth so we need to adjust
-        * the value of can_queue.
+        * Driver resources common to all SLI revisions
         */
-       shost->can_queue = phba->cfg_hba_queue_depth - 10;
-       if (phba->sli3_options & LPFC_SLI3_BG_ENABLED) {
+       atomic_set(&phba->fast_event_count, 0);
+       spin_lock_init(&phba->hbalock);
 
-               if (lpfc_prot_mask && lpfc_prot_guard) {
-                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                                       "1478 Registering BlockGuard with the "
-                                       "SCSI layer\n");
+       /* Initialize ndlp management spinlock */
+       spin_lock_init(&phba->ndlp_lock);
 
-                       scsi_host_set_prot(shost, lpfc_prot_mask);
-                       scsi_host_set_guard(shost, lpfc_prot_guard);
-               }
+       INIT_LIST_HEAD(&phba->port_list);
+       INIT_LIST_HEAD(&phba->work_list);
+       init_waitqueue_head(&phba->wait_4_mlo_m_q);
+
+       /* Initialize the wait queue head for the kernel thread */
+       init_waitqueue_head(&phba->work_waitq);
+
+       /* Initialize the scsi buffer list used by driver for scsi IO */
+       spin_lock_init(&phba->scsi_buf_list_lock);
+       INIT_LIST_HEAD(&phba->lpfc_scsi_buf_list);
+
+       /* Initialize the fabric iocb list */
+       INIT_LIST_HEAD(&phba->fabric_iocb_list);
+
+       /* Initialize list to save ELS buffers */
+       INIT_LIST_HEAD(&phba->elsbuf);
+
+       /* Initialize FCF connection rec list */
+       INIT_LIST_HEAD(&phba->fcf_conn_rec_list);
+
+       return 0;
+}
+
+/**
+ * lpfc_setup_driver_resource_phase2 - Phase2 setup driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the driver internal resources after the
+ * device specific resource setup to support the HBA device it attached to.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_setup_driver_resource_phase2(struct lpfc_hba *phba)
+{
+       int error;
+
+       /* Startup the kernel thread for this host adapter. */
+       phba->worker_thread = kthread_run(lpfc_do_work, phba,
+                                         "lpfc_worker_%d", phba->brd_no);
+       if (IS_ERR(phba->worker_thread)) {
+               error = PTR_ERR(phba->worker_thread);
+               return error;
        }
 
-       if (!_dump_buf_data) {
-               int pagecnt = 10;
-               while (pagecnt) {
-                       spin_lock_init(&_dump_buf_lock);
-                       _dump_buf_data =
-                               (char *) __get_free_pages(GFP_KERNEL, pagecnt);
-                       if (_dump_buf_data) {
-                               printk(KERN_ERR "BLKGRD allocated %d pages for "
-                                               "_dump_buf_data at 0x%p\n",
-                                               (1 << pagecnt), _dump_buf_data);
-                               _dump_buf_data_order = pagecnt;
-                               memset(_dump_buf_data, 0, ((1 << PAGE_SHIFT)
-                                                          << pagecnt));
-                               break;
-                       } else {
-                               --pagecnt;
-                       }
+       return 0;
+}
 
-               }
+/**
+ * lpfc_unset_driver_resource_phase2 - Phase2 unset driver internal resources.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the driver internal resources set up after
+ * the device specific resource setup for supporting the HBA device it
+ * attached to.
+ **/
+static void
+lpfc_unset_driver_resource_phase2(struct lpfc_hba *phba)
+{
+       /* Stop kernel worker thread */
+       kthread_stop(phba->worker_thread);
+}
 
-               if (!_dump_buf_data_order)
-                       printk(KERN_ERR "BLKGRD ERROR unable to allocate "
-                                       "memory for hexdump\n");
+/**
+ * lpfc_free_iocb_list - Free iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's IOCB list and memory.
+ **/
+static void
+lpfc_free_iocb_list(struct lpfc_hba *phba)
+{
+       struct lpfc_iocbq *iocbq_entry = NULL, *iocbq_next = NULL;
 
-       } else {
-               printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
-                      "\n", _dump_buf_data);
+       spin_lock_irq(&phba->hbalock);
+       list_for_each_entry_safe(iocbq_entry, iocbq_next,
+                                &phba->lpfc_iocb_list, list) {
+               list_del(&iocbq_entry->list);
+               kfree(iocbq_entry);
+               phba->total_iocbq_bufs--;
        }
+       spin_unlock_irq(&phba->hbalock);
 
+       return;
+}
 
-       if (!_dump_buf_dif) {
-               int pagecnt = 10;
-               while (pagecnt) {
-                       _dump_buf_dif =
-                               (char *) __get_free_pages(GFP_KERNEL, pagecnt);
-                       if (_dump_buf_dif) {
-                               printk(KERN_ERR "BLKGRD allocated %d pages for "
-                                               "_dump_buf_dif at 0x%p\n",
-                                               (1 << pagecnt), _dump_buf_dif);
-                               _dump_buf_dif_order = pagecnt;
-                               memset(_dump_buf_dif, 0, ((1 << PAGE_SHIFT)
-                                                         << pagecnt));
-                               break;
-                       } else {
-                               --pagecnt;
-                       }
+/**
+ * lpfc_init_iocb_list - Allocate and initialize iocb list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's IOCB
+ * list and set up the IOCB tag array accordingly.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_init_iocb_list(struct lpfc_hba *phba, int iocb_count)
+{
+       struct lpfc_iocbq *iocbq_entry = NULL;
+       uint16_t iotag;
+       int i;
 
+       /* Initialize and populate the iocb list per host.  */
+       INIT_LIST_HEAD(&phba->lpfc_iocb_list);
+       for (i = 0; i < iocb_count; i++) {
+               iocbq_entry = kzalloc(sizeof(struct lpfc_iocbq), GFP_KERNEL);
+               if (iocbq_entry == NULL) {
+                       printk(KERN_ERR "%s: only allocated %d iocbs of "
+                               "expected %d count. Unloading driver.\n",
+                               __func__, i, LPFC_IOCB_LIST_CNT);
+                       goto out_free_iocbq;
                }
 
-               if (!_dump_buf_dif_order)
-                       printk(KERN_ERR "BLKGRD ERROR unable to allocate "
-                                       "memory for hexdump\n");
+               iotag = lpfc_sli_next_iotag(phba, iocbq_entry);
+               if (iotag == 0) {
+                       kfree(iocbq_entry);
+                       printk(KERN_ERR "%s: failed to allocate IOTAG. "
+                               "Unloading driver.\n", __func__);
+                       goto out_free_iocbq;
+               }
+               iocbq_entry->sli4_xritag = NO_XRI;
 
-       } else {
-               printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
-                               _dump_buf_dif);
+               spin_lock_irq(&phba->hbalock);
+               list_add(&iocbq_entry->list, &phba->lpfc_iocb_list);
+               phba->total_iocbq_bufs++;
+               spin_unlock_irq(&phba->hbalock);
        }
 
-       lpfc_host_attrib_init(shost);
+       return 0;
 
-       if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
-               spin_lock_irq(shost->host_lock);
-               lpfc_poll_start_timer(phba);
-               spin_unlock_irq(shost->host_lock);
+out_free_iocbq:
+       lpfc_free_iocb_list(phba);
+
+       return -ENOMEM;
+}
+
+/**
+ * lpfc_free_sgl_list - Free sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver's sgl list and memory.
+ **/
+static void
+lpfc_free_sgl_list(struct lpfc_hba *phba)
+{
+       struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
+       LIST_HEAD(sglq_list);
+       int rc = 0;
+
+       spin_lock_irq(&phba->hbalock);
+       list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
+       spin_unlock_irq(&phba->hbalock);
+
+       list_for_each_entry_safe(sglq_entry, sglq_next,
+                                &sglq_list, list) {
+               list_del(&sglq_entry->list);
+               lpfc_mbuf_free(phba, sglq_entry->virt, sglq_entry->phys);
+               kfree(sglq_entry);
+               phba->sli4_hba.total_sglq_bufs--;
+       }
+       rc = lpfc_sli4_remove_all_sgl_pages(phba);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "2005 Unable to deregister pages from HBA: %x", rc);
        }
+       kfree(phba->sli4_hba.lpfc_els_sgl_array);
+}
 
-       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "0428 Perform SCSI scan\n");
-       /* Send board arrival event to upper layer */
-       adapter_event.event_type = FC_REG_ADAPTER_EVENT;
-       adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
-       fc_host_post_vendor_event(shost, fc_get_event_number(),
-               sizeof(adapter_event),
-               (char *) &adapter_event,
-               LPFC_NL_VENDOR_ID);
+/**
+ * lpfc_init_active_sgl_array - Allocate the buf to track active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate the driver's active sgl memory.
+ * This array will hold the sglq_entry's for active IOs.
+ **/
+static int
+lpfc_init_active_sgl_array(struct lpfc_hba *phba)
+{
+       int size;
+       size = sizeof(struct lpfc_sglq *);
+       size *= phba->sli4_hba.max_cfg_param.max_xri;
 
+       phba->sli4_hba.lpfc_sglq_active_list =
+               kzalloc(size, GFP_KERNEL);
+       if (!phba->sli4_hba.lpfc_sglq_active_list)
+               return -ENOMEM;
        return 0;
+}
+
+/**
+ * lpfc_free_active_sgl - Free the buf that tracks active ELS XRIs.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to walk through the array of active sglq entries
+ * and free all of the resources.
+ * This is just a place holder for now.
+ **/
+static void
+lpfc_free_active_sgl(struct lpfc_hba *phba)
+{
+       kfree(phba->sli4_hba.lpfc_sglq_active_list);
+}
+
+/**
+ * lpfc_init_sgl_list - Allocate and initialize sgl list.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and initizlize the driver's sgl
+ * list and set up the sgl xritag tag array accordingly.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_init_sgl_list(struct lpfc_hba *phba)
+{
+       struct lpfc_sglq *sglq_entry = NULL;
+       int i;
+       int els_xri_cnt;
+
+       els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                               "2400 lpfc_init_sgl_list els %d.\n",
+                               els_xri_cnt);
+       /* Initialize and populate the sglq list per host/VF. */
+       INIT_LIST_HEAD(&phba->sli4_hba.lpfc_sgl_list);
+       INIT_LIST_HEAD(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+       /* Sanity check on XRI management */
+       if (phba->sli4_hba.max_cfg_param.max_xri <= els_xri_cnt) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2562 No room left for SCSI XRI allocation: "
+                               "max_xri=%d, els_xri=%d\n",
+                               phba->sli4_hba.max_cfg_param.max_xri,
+                               els_xri_cnt);
+               return -ENOMEM;
+       }
+
+       /* Allocate memory for the ELS XRI management array */
+       phba->sli4_hba.lpfc_els_sgl_array =
+                       kzalloc((sizeof(struct lpfc_sglq *) * els_xri_cnt),
+                       GFP_KERNEL);
+
+       if (!phba->sli4_hba.lpfc_els_sgl_array) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2401 Failed to allocate memory for ELS "
+                               "XRI management array of size %d.\n",
+                               els_xri_cnt);
+               return -ENOMEM;
+       }
+
+       /* Keep the SCSI XRI into the XRI management array */
+       phba->sli4_hba.scsi_xri_max =
+                       phba->sli4_hba.max_cfg_param.max_xri - els_xri_cnt;
+       phba->sli4_hba.scsi_xri_cnt = 0;
+
+       phba->sli4_hba.lpfc_scsi_psb_array =
+                       kzalloc((sizeof(struct lpfc_scsi_buf *) *
+                       phba->sli4_hba.scsi_xri_max), GFP_KERNEL);
+
+       if (!phba->sli4_hba.lpfc_scsi_psb_array) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2563 Failed to allocate memory for SCSI "
+                               "XRI management array of size %d.\n",
+                               phba->sli4_hba.scsi_xri_max);
+               kfree(phba->sli4_hba.lpfc_els_sgl_array);
+               return -ENOMEM;
+       }
+
+       for (i = 0; i < els_xri_cnt; i++) {
+               sglq_entry = kzalloc(sizeof(struct lpfc_sglq), GFP_KERNEL);
+               if (sglq_entry == NULL) {
+                       printk(KERN_ERR "%s: only allocated %d sgls of "
+                               "expected %d count. Unloading driver.\n",
+                               __func__, i, els_xri_cnt);
+                       goto out_free_mem;
+               }
+
+               sglq_entry->sli4_xritag = lpfc_sli4_next_xritag(phba);
+               if (sglq_entry->sli4_xritag == NO_XRI) {
+                       kfree(sglq_entry);
+                       printk(KERN_ERR "%s: failed to allocate XRI.\n"
+                               "Unloading driver.\n", __func__);
+                       goto out_free_mem;
+               }
+               sglq_entry->buff_type = GEN_BUFF_TYPE;
+               sglq_entry->virt = lpfc_mbuf_alloc(phba, 0, &sglq_entry->phys);
+               if (sglq_entry->virt == NULL) {
+                       kfree(sglq_entry);
+                       printk(KERN_ERR "%s: failed to allocate mbuf.\n"
+                               "Unloading driver.\n", __func__);
+                       goto out_free_mem;
+               }
+               sglq_entry->sgl = sglq_entry->virt;
+               memset(sglq_entry->sgl, 0, LPFC_BPL_SIZE);
+
+               /* The list order is used by later block SGL registraton */
+               spin_lock_irq(&phba->hbalock);
+               list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
+               phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
+               phba->sli4_hba.total_sglq_bufs++;
+               spin_unlock_irq(&phba->hbalock);
+       }
+       return 0;
+
+out_free_mem:
+       kfree(phba->sli4_hba.lpfc_scsi_psb_array);
+       lpfc_free_sgl_list(phba);
+       return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_init_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ * No locks are held here because this is an initialization routine
+ * called only from probe or lpfc_online when interrupts are not
+ * enabled and the driver is reinitializing the device.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *phba)
+{
+       int rc = 0;
+       int longs;
+       uint16_t rpi_count;
+       struct lpfc_rpi_hdr *rpi_hdr;
+
+       INIT_LIST_HEAD(&phba->sli4_hba.lpfc_rpi_hdr_list);
+
+       /*
+        * Provision an rpi bitmask range for discovery. The total count
+        * is the difference between max and base + 1.
+        */
+       rpi_count = phba->sli4_hba.max_cfg_param.rpi_base +
+                   phba->sli4_hba.max_cfg_param.max_rpi - 1;
+
+       longs = ((rpi_count) + BITS_PER_LONG - 1) / BITS_PER_LONG;
+       phba->sli4_hba.rpi_bmask = kzalloc(longs * sizeof(unsigned long),
+                                          GFP_KERNEL);
+       if (!phba->sli4_hba.rpi_bmask)
+               return -ENOMEM;
+
+       rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+       if (!rpi_hdr) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "0391 Error during rpi post operation\n");
+               lpfc_sli4_remove_rpis(phba);
+               rc = -ENODEV;
+       }
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_create_rpi_hdr - Allocate an rpi header memory region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate a single 4KB memory region to
+ * support rpis and stores them in the phba.  This single region
+ * provides support for up to 64 rpis.  The region is used globally
+ * by the device.
+ *
+ * Returns:
+ *   A valid rpi hdr on success.
+ *   A NULL pointer on any failure.
+ **/
+struct lpfc_rpi_hdr *
+lpfc_sli4_create_rpi_hdr(struct lpfc_hba *phba)
+{
+       uint16_t rpi_limit, curr_rpi_range;
+       struct lpfc_dmabuf *dmabuf;
+       struct lpfc_rpi_hdr *rpi_hdr;
+
+       rpi_limit = phba->sli4_hba.max_cfg_param.rpi_base +
+                   phba->sli4_hba.max_cfg_param.max_rpi - 1;
+
+       spin_lock_irq(&phba->hbalock);
+       curr_rpi_range = phba->sli4_hba.next_rpi;
+       spin_unlock_irq(&phba->hbalock);
+
+       /*
+        * The port has a limited number of rpis. The increment here
+        * is LPFC_RPI_HDR_COUNT - 1 to account for the starting value
+        * and to allow the full max_rpi range per port.
+        */
+       if ((curr_rpi_range + (LPFC_RPI_HDR_COUNT - 1)) > rpi_limit)
+               return NULL;
+
+       /*
+        * First allocate the protocol header region for the port.  The
+        * port expects a 4KB DMA-mapped memory region that is 4K aligned.
+        */
+       dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (!dmabuf)
+               return NULL;
+
+       dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+                                         LPFC_HDR_TEMPLATE_SIZE,
+                                         &dmabuf->phys,
+                                         GFP_KERNEL);
+       if (!dmabuf->virt) {
+               rpi_hdr = NULL;
+               goto err_free_dmabuf;
+       }
+
+       memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
+       if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
+               rpi_hdr = NULL;
+               goto err_free_coherent;
+       }
+
+       /* Save the rpi header data for cleanup later. */
+       rpi_hdr = kzalloc(sizeof(struct lpfc_rpi_hdr), GFP_KERNEL);
+       if (!rpi_hdr)
+               goto err_free_coherent;
+
+       rpi_hdr->dmabuf = dmabuf;
+       rpi_hdr->len = LPFC_HDR_TEMPLATE_SIZE;
+       rpi_hdr->page_count = 1;
+       spin_lock_irq(&phba->hbalock);
+       rpi_hdr->start_rpi = phba->sli4_hba.next_rpi;
+       list_add_tail(&rpi_hdr->list, &phba->sli4_hba.lpfc_rpi_hdr_list);
+
+       /*
+        * The next_rpi stores the next module-64 rpi value to post
+        * in any subsequent rpi memory region postings.
+        */
+       phba->sli4_hba.next_rpi += LPFC_RPI_HDR_COUNT;
+       spin_unlock_irq(&phba->hbalock);
+       return rpi_hdr;
+
+ err_free_coherent:
+       dma_free_coherent(&phba->pcidev->dev, LPFC_HDR_TEMPLATE_SIZE,
+                         dmabuf->virt, dmabuf->phys);
+ err_free_dmabuf:
+       kfree(dmabuf);
+       return NULL;
+}
+
+/**
+ * lpfc_sli4_remove_rpi_hdrs - Remove all rpi header memory regions
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove all memory resources allocated
+ * to support rpis. This routine presumes the caller has released all
+ * rpis consumed by fabric or port logins and is prepared to have
+ * the header pages removed.
+ **/
+void
+lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *phba)
+{
+       struct lpfc_rpi_hdr *rpi_hdr, *next_rpi_hdr;
+
+       list_for_each_entry_safe(rpi_hdr, next_rpi_hdr,
+                                &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+               list_del(&rpi_hdr->list);
+               dma_free_coherent(&phba->pcidev->dev, rpi_hdr->len,
+                                 rpi_hdr->dmabuf->virt, rpi_hdr->dmabuf->phys);
+               kfree(rpi_hdr->dmabuf);
+               kfree(rpi_hdr);
+       }
+
+       phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
+       memset(phba->sli4_hba.rpi_bmask, 0, sizeof(*phba->sli4_hba.rpi_bmask));
+}
+
+/**
+ * lpfc_hba_alloc - Allocate driver hba data structure for a device.
+ * @pdev: pointer to pci device data structure.
+ *
+ * This routine is invoked to allocate the driver hba data structure for an
+ * HBA device. If the allocation is successful, the phba reference to the
+ * PCI device data structure is set.
+ *
+ * Return codes
+ *      pointer to @phba - sucessful
+ *      NULL - error
+ **/
+static struct lpfc_hba *
+lpfc_hba_alloc(struct pci_dev *pdev)
+{
+       struct lpfc_hba *phba;
+
+       /* Allocate memory for HBA structure */
+       phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
+       if (!phba) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1417 Failed to allocate hba struct.\n");
+               return NULL;
+       }
+
+       /* Set reference to PCI device in HBA structure */
+       phba->pcidev = pdev;
+
+       /* Assign an unused board number */
+       phba->brd_no = lpfc_get_instance();
+       if (phba->brd_no < 0) {
+               kfree(phba);
+               return NULL;
+       }
+
+       return phba;
+}
+
+/**
+ * lpfc_hba_free - Free driver hba data structure with a device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the driver hba data structure with an
+ * HBA device.
+ **/
+static void
+lpfc_hba_free(struct lpfc_hba *phba)
+{
+       /* Release the driver assigned board number */
+       idr_remove(&lpfc_hba_index, phba->brd_no);
+
+       kfree(phba);
+       return;
+}
+
+/**
+ * lpfc_create_shost - Create hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create HBA physical port and associate a SCSI
+ * host with it.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      other values - error
+ **/
+static int
+lpfc_create_shost(struct lpfc_hba *phba)
+{
+       struct lpfc_vport *vport;
+       struct Scsi_Host  *shost;
+
+       /* Initialize HBA FC structure */
+       phba->fc_edtov = FF_DEF_EDTOV;
+       phba->fc_ratov = FF_DEF_RATOV;
+       phba->fc_altov = FF_DEF_ALTOV;
+       phba->fc_arbtov = FF_DEF_ARBTOV;
+
+       vport = lpfc_create_port(phba, phba->brd_no, &phba->pcidev->dev);
+       if (!vport)
+               return -ENODEV;
+
+       shost = lpfc_shost_from_vport(vport);
+       phba->pport = vport;
+       lpfc_debugfs_initialize(vport);
+       /* Put reference to SCSI host to driver's device private data */
+       pci_set_drvdata(phba->pcidev, shost);
+
+       return 0;
+}
+
+/**
+ * lpfc_destroy_shost - Destroy hba physical port with associated scsi host.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to destroy HBA physical port and the associated
+ * SCSI host.
+ **/
+static void
+lpfc_destroy_shost(struct lpfc_hba *phba)
+{
+       struct lpfc_vport *vport = phba->pport;
+
+       /* Destroy physical port that associated with the SCSI host */
+       destroy_port(vport);
+
+       return;
+}
+
+/**
+ * lpfc_setup_bg - Setup Block guard structures and debug areas.
+ * @phba: pointer to lpfc hba data structure.
+ * @shost: the shost to be used to detect Block guard settings.
+ *
+ * This routine sets up the local Block guard protocol settings for @shost.
+ * This routine also allocates memory for debugging bg buffers.
+ **/
+static void
+lpfc_setup_bg(struct lpfc_hba *phba, struct Scsi_Host *shost)
+{
+       int pagecnt = 10;
+       if (lpfc_prot_mask && lpfc_prot_guard) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "1478 Registering BlockGuard with the "
+                               "SCSI layer\n");
+               scsi_host_set_prot(shost, lpfc_prot_mask);
+               scsi_host_set_guard(shost, lpfc_prot_guard);
+       }
+       if (!_dump_buf_data) {
+               while (pagecnt) {
+                       spin_lock_init(&_dump_buf_lock);
+                       _dump_buf_data =
+                               (char *) __get_free_pages(GFP_KERNEL, pagecnt);
+                       if (_dump_buf_data) {
+                               printk(KERN_ERR "BLKGRD allocated %d pages for "
+                                      "_dump_buf_data at 0x%p\n",
+                                      (1 << pagecnt), _dump_buf_data);
+                               _dump_buf_data_order = pagecnt;
+                               memset(_dump_buf_data, 0,
+                                      ((1 << PAGE_SHIFT) << pagecnt));
+                               break;
+                       } else
+                               --pagecnt;
+               }
+               if (!_dump_buf_data_order)
+                       printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+                              "memory for hexdump\n");
+       } else
+               printk(KERN_ERR "BLKGRD already allocated _dump_buf_data=0x%p"
+                      "\n", _dump_buf_data);
+       if (!_dump_buf_dif) {
+               while (pagecnt) {
+                       _dump_buf_dif =
+                               (char *) __get_free_pages(GFP_KERNEL, pagecnt);
+                       if (_dump_buf_dif) {
+                               printk(KERN_ERR "BLKGRD allocated %d pages for "
+                                      "_dump_buf_dif at 0x%p\n",
+                                      (1 << pagecnt), _dump_buf_dif);
+                               _dump_buf_dif_order = pagecnt;
+                               memset(_dump_buf_dif, 0,
+                                      ((1 << PAGE_SHIFT) << pagecnt));
+                               break;
+                       } else
+                               --pagecnt;
+               }
+               if (!_dump_buf_dif_order)
+                       printk(KERN_ERR "BLKGRD ERROR unable to allocate "
+                              "memory for hexdump\n");
+       } else
+               printk(KERN_ERR "BLKGRD already allocated _dump_buf_dif=0x%p\n",
+                      _dump_buf_dif);
+}
+
+/**
+ * lpfc_post_init_setup - Perform necessary device post initialization setup.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to perform all the necessary post initialization
+ * setup for the device.
+ **/
+static void
+lpfc_post_init_setup(struct lpfc_hba *phba)
+{
+       struct Scsi_Host  *shost;
+       struct lpfc_adapter_event_header adapter_event;
+
+       /* Get the default values for Model Name and Description */
+       lpfc_get_hba_model_desc(phba, phba->ModelName, phba->ModelDesc);
+
+       /*
+        * hba setup may have changed the hba_queue_depth so we need to
+        * adjust the value of can_queue.
+        */
+       shost = pci_get_drvdata(phba->pcidev);
+       shost->can_queue = phba->cfg_hba_queue_depth - 10;
+       if (phba->sli3_options & LPFC_SLI3_BG_ENABLED)
+               lpfc_setup_bg(phba, shost);
+
+       lpfc_host_attrib_init(shost);
+
+       if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
+               spin_lock_irq(shost->host_lock);
+               lpfc_poll_start_timer(phba);
+               spin_unlock_irq(shost->host_lock);
+       }
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "0428 Perform SCSI scan\n");
+       /* Send board arrival event to upper layer */
+       adapter_event.event_type = FC_REG_ADAPTER_EVENT;
+       adapter_event.subcategory = LPFC_EVENT_ARRIVAL;
+       fc_host_post_vendor_event(shost, fc_get_event_number(),
+                                 sizeof(adapter_event),
+                                 (char *) &adapter_event,
+                                 LPFC_NL_VENDOR_ID);
+       return;
+}
+
+/**
+ * lpfc_sli_pci_mem_setup - Setup SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-3 interface spec.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_sli_pci_mem_setup(struct lpfc_hba *phba)
+{
+       struct pci_dev *pdev;
+       unsigned long bar0map_len, bar2map_len;
+       int i, hbq_count;
+       void *ptr;
+       int error = -ENODEV;
+
+       /* Obtain PCI device reference */
+       if (!phba->pcidev)
+               return error;
+       else
+               pdev = phba->pcidev;
+
+       /* Set the device DMA mask size */
+       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
+               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+                       return error;
+
+       /* Get the bus address of Bar0 and Bar2 and the number of bytes
+        * required by each mapping.
+        */
+       phba->pci_bar0_map = pci_resource_start(pdev, 0);
+       bar0map_len = pci_resource_len(pdev, 0);
+
+       phba->pci_bar2_map = pci_resource_start(pdev, 2);
+       bar2map_len = pci_resource_len(pdev, 2);
+
+       /* Map HBA SLIM to a kernel virtual address. */
+       phba->slim_memmap_p = ioremap(phba->pci_bar0_map, bar0map_len);
+       if (!phba->slim_memmap_p) {
+               dev_printk(KERN_ERR, &pdev->dev,
+                          "ioremap failed for SLIM memory.\n");
+               goto out;
+       }
+
+       /* Map HBA Control Registers to a kernel virtual address. */
+       phba->ctrl_regs_memmap_p = ioremap(phba->pci_bar2_map, bar2map_len);
+       if (!phba->ctrl_regs_memmap_p) {
+               dev_printk(KERN_ERR, &pdev->dev,
+                          "ioremap failed for HBA control registers.\n");
+               goto out_iounmap_slim;
+       }
+
+       /* Allocate memory for SLI-2 structures */
+       phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
+                                              SLI2_SLIM_SIZE,
+                                              &phba->slim2p.phys,
+                                              GFP_KERNEL);
+       if (!phba->slim2p.virt)
+               goto out_iounmap;
+
+       memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
+       phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
+       phba->pcb = (phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, pcb));
+       phba->IOCBs = (phba->slim2p.virt +
+                      offsetof(struct lpfc_sli2_slim, IOCBs));
+
+       phba->hbqslimp.virt = dma_alloc_coherent(&pdev->dev,
+                                                lpfc_sli_hbq_size(),
+                                                &phba->hbqslimp.phys,
+                                                GFP_KERNEL);
+       if (!phba->hbqslimp.virt)
+               goto out_free_slim;
+
+       hbq_count = lpfc_sli_hbq_count();
+       ptr = phba->hbqslimp.virt;
+       for (i = 0; i < hbq_count; ++i) {
+               phba->hbqs[i].hbq_virt = ptr;
+               INIT_LIST_HEAD(&phba->hbqs[i].hbq_buffer_list);
+               ptr += (lpfc_hbq_defs[i]->entry_count *
+                       sizeof(struct lpfc_hbq_entry));
+       }
+       phba->hbqs[LPFC_ELS_HBQ].hbq_alloc_buffer = lpfc_els_hbq_alloc;
+       phba->hbqs[LPFC_ELS_HBQ].hbq_free_buffer = lpfc_els_hbq_free;
+
+       memset(phba->hbqslimp.virt, 0, lpfc_sli_hbq_size());
+
+       INIT_LIST_HEAD(&phba->rb_pend_list);
+
+       phba->MBslimaddr = phba->slim_memmap_p;
+       phba->HAregaddr = phba->ctrl_regs_memmap_p + HA_REG_OFFSET;
+       phba->CAregaddr = phba->ctrl_regs_memmap_p + CA_REG_OFFSET;
+       phba->HSregaddr = phba->ctrl_regs_memmap_p + HS_REG_OFFSET;
+       phba->HCregaddr = phba->ctrl_regs_memmap_p + HC_REG_OFFSET;
+
+       return 0;
+
+out_free_slim:
+       dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+                         phba->slim2p.virt, phba->slim2p.phys);
+out_iounmap:
+       iounmap(phba->ctrl_regs_memmap_p);
+out_iounmap_slim:
+       iounmap(phba->slim_memmap_p);
+out:
+       return error;
+}
+
+/**
+ * lpfc_sli_pci_mem_unset - Unset SLI3 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_pci_mem_unset(struct lpfc_hba *phba)
+{
+       struct pci_dev *pdev;
+
+       /* Obtain PCI device reference */
+       if (!phba->pcidev)
+               return;
+       else
+               pdev = phba->pcidev;
+
+       /* Free coherent DMA memory allocated */
+       dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+                         phba->hbqslimp.virt, phba->hbqslimp.phys);
+       dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+                         phba->slim2p.virt, phba->slim2p.phys);
+
+       /* I/O memory unmap */
+       iounmap(phba->ctrl_regs_memmap_p);
+       iounmap(phba->slim_memmap_p);
+
+       return;
+}
+
+/**
+ * lpfc_sli4_post_status_check - Wait for SLI4 POST done and check status
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to wait for SLI4 device Power On Self Test (POST)
+ * done and check status.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+int
+lpfc_sli4_post_status_check(struct lpfc_hba *phba)
+{
+       struct lpfc_register sta_reg, uerrlo_reg, uerrhi_reg, scratchpad;
+       uint32_t onlnreg0, onlnreg1;
+       int i, port_error = -ENODEV;
+
+       if (!phba->sli4_hba.STAregaddr)
+               return -ENODEV;
+
+       /* With uncoverable error, log the error message and return error */
+       onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
+       onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
+       if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
+               uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
+               uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
+               if (uerrlo_reg.word0 || uerrhi_reg.word0) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "1422 HBA Unrecoverable error: "
+                                       "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+                                       "online0_reg=0x%x, online1_reg=0x%x\n",
+                                       uerrlo_reg.word0, uerrhi_reg.word0,
+                                       onlnreg0, onlnreg1);
+               }
+               return -ENODEV;
+       }
+
+       /* Wait up to 30 seconds for the SLI Port POST done and ready */
+       for (i = 0; i < 3000; i++) {
+               sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
+               /* Encounter fatal POST error, break out */
+               if (bf_get(lpfc_hst_state_perr, &sta_reg)) {
+                       port_error = -ENODEV;
+                       break;
+               }
+               if (LPFC_POST_STAGE_ARMFW_READY ==
+                   bf_get(lpfc_hst_state_port_status, &sta_reg)) {
+                       port_error = 0;
+                       break;
+               }
+               msleep(10);
+       }
+
+       if (port_error)
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "1408 Failure HBA POST Status: sta_reg=0x%x, "
+                       "perr=x%x, sfi=x%x, nip=x%x, ipc=x%x, xrom=x%x, "
+                       "dl=x%x, pstatus=x%x\n", sta_reg.word0,
+                       bf_get(lpfc_hst_state_perr, &sta_reg),
+                       bf_get(lpfc_hst_state_sfi, &sta_reg),
+                       bf_get(lpfc_hst_state_nip, &sta_reg),
+                       bf_get(lpfc_hst_state_ipc, &sta_reg),
+                       bf_get(lpfc_hst_state_xrom, &sta_reg),
+                       bf_get(lpfc_hst_state_dl, &sta_reg),
+                       bf_get(lpfc_hst_state_port_status, &sta_reg));
+
+       /* Log device information */
+       scratchpad.word0 =  readl(phba->sli4_hba.SCRATCHPADregaddr);
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "2534 Device Info: ChipType=0x%x, SliRev=0x%x, "
+                       "FeatureL1=0x%x, FeatureL2=0x%x\n",
+                       bf_get(lpfc_scratchpad_chiptype, &scratchpad),
+                       bf_get(lpfc_scratchpad_slirev, &scratchpad),
+                       bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
+                       bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
+
+       return port_error;
+}
+
+/**
+ * lpfc_sli4_bar0_register_memmap - Set up SLI4 BAR0 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR0 PCI config space register
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar0_register_memmap(struct lpfc_hba *phba)
+{
+       phba->sli4_hba.UERRLOregaddr = phba->sli4_hba.conf_regs_memmap_p +
+                                       LPFC_UERR_STATUS_LO;
+       phba->sli4_hba.UERRHIregaddr = phba->sli4_hba.conf_regs_memmap_p +
+                                       LPFC_UERR_STATUS_HI;
+       phba->sli4_hba.ONLINE0regaddr = phba->sli4_hba.conf_regs_memmap_p +
+                                       LPFC_ONLINE0;
+       phba->sli4_hba.ONLINE1regaddr = phba->sli4_hba.conf_regs_memmap_p +
+                                       LPFC_ONLINE1;
+       phba->sli4_hba.SCRATCHPADregaddr = phba->sli4_hba.conf_regs_memmap_p +
+                                       LPFC_SCRATCHPAD;
+}
+
+/**
+ * lpfc_sli4_bar1_register_memmap - Set up SLI4 BAR1 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up SLI4 BAR1 control status register (CSR)
+ * memory map.
+ **/
+static void
+lpfc_sli4_bar1_register_memmap(struct lpfc_hba *phba)
+{
+
+       phba->sli4_hba.STAregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+                                   LPFC_HST_STATE;
+       phba->sli4_hba.ISRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+                                   LPFC_HST_ISR0;
+       phba->sli4_hba.IMRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+                                   LPFC_HST_IMR0;
+       phba->sli4_hba.ISCRregaddr = phba->sli4_hba.ctrl_regs_memmap_p +
+                                    LPFC_HST_ISCR0;
+       return;
+}
+
+/**
+ * lpfc_sli4_bar2_register_memmap - Set up SLI4 BAR2 register memory map.
+ * @phba: pointer to lpfc hba data structure.
+ * @vf: virtual function number
+ *
+ * This routine is invoked to set up SLI4 BAR2 doorbell register memory map
+ * based on the given viftual function number, @vf.
+ *
+ * Return 0 if successful, otherwise -ENODEV.
+ **/
+static int
+lpfc_sli4_bar2_register_memmap(struct lpfc_hba *phba, uint32_t vf)
+{
+       if (vf > LPFC_VIR_FUNC_MAX)
+               return -ENODEV;
+
+       phba->sli4_hba.RQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+                               vf * LPFC_VFR_PAGE_SIZE + LPFC_RQ_DOORBELL);
+       phba->sli4_hba.WQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+                               vf * LPFC_VFR_PAGE_SIZE + LPFC_WQ_DOORBELL);
+       phba->sli4_hba.EQCQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+                               vf * LPFC_VFR_PAGE_SIZE + LPFC_EQCQ_DOORBELL);
+       phba->sli4_hba.MQDBregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+                               vf * LPFC_VFR_PAGE_SIZE + LPFC_MQ_DOORBELL);
+       phba->sli4_hba.BMBXregaddr = (phba->sli4_hba.drbl_regs_memmap_p +
+                               vf * LPFC_VFR_PAGE_SIZE + LPFC_BMBX);
+       return 0;
+}
+
+/**
+ * lpfc_create_bootstrap_mbox - Create the bootstrap mailbox
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to create the bootstrap mailbox
+ * region consistent with the SLI-4 interface spec.  This
+ * routine allocates all memory necessary to communicate
+ * mailbox commands to the port and sets up all alignment
+ * needs.  No locks are expected to be held when calling
+ * this routine.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_create_bootstrap_mbox(struct lpfc_hba *phba)
+{
+       uint32_t bmbx_size;
+       struct lpfc_dmabuf *dmabuf;
+       struct dma_address *dma_address;
+       uint32_t pa_addr;
+       uint64_t phys_addr;
+
+       dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (!dmabuf)
+               return -ENOMEM;
+
+       /*
+        * The bootstrap mailbox region is comprised of 2 parts
+        * plus an alignment restriction of 16 bytes.
+        */
+       bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
+       dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+                                         bmbx_size,
+                                         &dmabuf->phys,
+                                         GFP_KERNEL);
+       if (!dmabuf->virt) {
+               kfree(dmabuf);
+               return -ENOMEM;
+       }
+       memset(dmabuf->virt, 0, bmbx_size);
+
+       /*
+        * Initialize the bootstrap mailbox pointers now so that the register
+        * operations are simple later.  The mailbox dma address is required
+        * to be 16-byte aligned.  Also align the virtual memory as each
+        * maibox is copied into the bmbx mailbox region before issuing the
+        * command to the port.
+        */
+       phba->sli4_hba.bmbx.dmabuf = dmabuf;
+       phba->sli4_hba.bmbx.bmbx_size = bmbx_size;
+
+       phba->sli4_hba.bmbx.avirt = PTR_ALIGN(dmabuf->virt,
+                                             LPFC_ALIGN_16_BYTE);
+       phba->sli4_hba.bmbx.aphys = ALIGN(dmabuf->phys,
+                                             LPFC_ALIGN_16_BYTE);
+
+       /*
+        * Set the high and low physical addresses now.  The SLI4 alignment
+        * requirement is 16 bytes and the mailbox is posted to the port
+        * as two 30-bit addresses.  The other data is a bit marking whether
+        * the 30-bit address is the high or low address.
+        * Upcast bmbx aphys to 64bits so shift instruction compiles
+        * clean on 32 bit machines.
+        */
+       dma_address = &phba->sli4_hba.bmbx.dma_address;
+       phys_addr = (uint64_t)phba->sli4_hba.bmbx.aphys;
+       pa_addr = (uint32_t) ((phys_addr >> 34) & 0x3fffffff);
+       dma_address->addr_hi = (uint32_t) ((pa_addr << 2) |
+                                          LPFC_BMBX_BIT1_ADDR_HI);
+
+       pa_addr = (uint32_t) ((phba->sli4_hba.bmbx.aphys >> 4) & 0x3fffffff);
+       dma_address->addr_lo = (uint32_t) ((pa_addr << 2) |
+                                          LPFC_BMBX_BIT1_ADDR_LO);
+       return 0;
+}
+
+/**
+ * lpfc_destroy_bootstrap_mbox - Destroy all bootstrap mailbox resources
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to teardown the bootstrap mailbox
+ * region and release all host resources. This routine requires
+ * the caller to ensure all mailbox commands recovered, no
+ * additional mailbox comands are sent, and interrupts are disabled
+ * before calling this routine.
+ *
+ **/
+static void
+lpfc_destroy_bootstrap_mbox(struct lpfc_hba *phba)
+{
+       dma_free_coherent(&phba->pcidev->dev,
+                         phba->sli4_hba.bmbx.bmbx_size,
+                         phba->sli4_hba.bmbx.dmabuf->virt,
+                         phba->sli4_hba.bmbx.dmabuf->phys);
+
+       kfree(phba->sli4_hba.bmbx.dmabuf);
+       memset(&phba->sli4_hba.bmbx, 0, sizeof(struct lpfc_bmbx));
+}
+
+/**
+ * lpfc_sli4_read_config - Get the config parameters.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to read the configuration parameters from the HBA.
+ * The configuration parameters are used to set the base and maximum values
+ * for RPI's XRI's VPI's VFI's and FCFIs. These values also affect the resource
+ * allocation for the port.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_sli4_read_config(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *pmb;
+       struct lpfc_mbx_read_config *rd_config;
+       uint32_t rc = 0;
+
+       pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!pmb) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2011 Unable to allocate memory for issuing "
+                               "SLI_CONFIG_SPECIAL mailbox command\n");
+               return -ENOMEM;
+       }
+
+       lpfc_read_config(phba, pmb);
+
+       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "2012 Mailbox failed , mbxCmd x%x "
+                       "READ_CONFIG, mbxStatus x%x\n",
+                       bf_get(lpfc_mqe_command, &pmb->u.mqe),
+                       bf_get(lpfc_mqe_status, &pmb->u.mqe));
+               rc = -EIO;
+       } else {
+               rd_config = &pmb->u.mqe.un.rd_config;
+               phba->sli4_hba.max_cfg_param.max_xri =
+                       bf_get(lpfc_mbx_rd_conf_xri_count, rd_config);
+               phba->sli4_hba.max_cfg_param.xri_base =
+                       bf_get(lpfc_mbx_rd_conf_xri_base, rd_config);
+               phba->sli4_hba.max_cfg_param.max_vpi =
+                       bf_get(lpfc_mbx_rd_conf_vpi_count, rd_config);
+               phba->sli4_hba.max_cfg_param.vpi_base =
+                       bf_get(lpfc_mbx_rd_conf_vpi_base, rd_config);
+               phba->sli4_hba.max_cfg_param.max_rpi =
+                       bf_get(lpfc_mbx_rd_conf_rpi_count, rd_config);
+               phba->sli4_hba.max_cfg_param.rpi_base =
+                       bf_get(lpfc_mbx_rd_conf_rpi_base, rd_config);
+               phba->sli4_hba.max_cfg_param.max_vfi =
+                       bf_get(lpfc_mbx_rd_conf_vfi_count, rd_config);
+               phba->sli4_hba.max_cfg_param.vfi_base =
+                       bf_get(lpfc_mbx_rd_conf_vfi_base, rd_config);
+               phba->sli4_hba.max_cfg_param.max_fcfi =
+                       bf_get(lpfc_mbx_rd_conf_fcfi_count, rd_config);
+               phba->sli4_hba.max_cfg_param.fcfi_base =
+                       bf_get(lpfc_mbx_rd_conf_fcfi_base, rd_config);
+               phba->sli4_hba.max_cfg_param.max_eq =
+                       bf_get(lpfc_mbx_rd_conf_eq_count, rd_config);
+               phba->sli4_hba.max_cfg_param.max_rq =
+                       bf_get(lpfc_mbx_rd_conf_rq_count, rd_config);
+               phba->sli4_hba.max_cfg_param.max_wq =
+                       bf_get(lpfc_mbx_rd_conf_wq_count, rd_config);
+               phba->sli4_hba.max_cfg_param.max_cq =
+                       bf_get(lpfc_mbx_rd_conf_cq_count, rd_config);
+               phba->lmt = bf_get(lpfc_mbx_rd_conf_lmt, rd_config);
+               phba->sli4_hba.next_xri = phba->sli4_hba.max_cfg_param.xri_base;
+               phba->vpi_base = phba->sli4_hba.max_cfg_param.vpi_base;
+               phba->vfi_base = phba->sli4_hba.max_cfg_param.vfi_base;
+               phba->sli4_hba.next_rpi = phba->sli4_hba.max_cfg_param.rpi_base;
+               phba->max_vpi = phba->sli4_hba.max_cfg_param.max_vpi;
+               phba->max_vports = phba->max_vpi;
+               lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                               "2003 cfg params XRI(B:%d M:%d), "
+                               "VPI(B:%d M:%d) "
+                               "VFI(B:%d M:%d) "
+                               "RPI(B:%d M:%d) "
+                               "FCFI(B:%d M:%d)\n",
+                               phba->sli4_hba.max_cfg_param.xri_base,
+                               phba->sli4_hba.max_cfg_param.max_xri,
+                               phba->sli4_hba.max_cfg_param.vpi_base,
+                               phba->sli4_hba.max_cfg_param.max_vpi,
+                               phba->sli4_hba.max_cfg_param.vfi_base,
+                               phba->sli4_hba.max_cfg_param.max_vfi,
+                               phba->sli4_hba.max_cfg_param.rpi_base,
+                               phba->sli4_hba.max_cfg_param.max_rpi,
+                               phba->sli4_hba.max_cfg_param.fcfi_base,
+                               phba->sli4_hba.max_cfg_param.max_fcfi);
+       }
+       mempool_free(pmb, phba->mbox_mem_pool);
+
+       /* Reset the DFT_HBA_Q_DEPTH to the max xri  */
+       if (phba->cfg_hba_queue_depth > (phba->sli4_hba.max_cfg_param.max_xri))
+               phba->cfg_hba_queue_depth =
+                               phba->sli4_hba.max_cfg_param.max_xri;
+       return rc;
+}
+
+/**
+ * lpfc_dev_endian_order_setup - Notify the port of the host's endian order.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to setup the host-side endian order to the
+ * HBA consistent with the SLI-4 interface spec.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_setup_endian_order(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *mboxq;
+       uint32_t rc = 0;
+       uint32_t endian_mb_data[2] = {HOST_ENDIAN_LOW_WORD0,
+                                     HOST_ENDIAN_HIGH_WORD1};
+
+       mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0492 Unable to allocate memory for issuing "
+                               "SLI_CONFIG_SPECIAL mailbox command\n");
+               return -ENOMEM;
+       }
+
+       /*
+        * The SLI4_CONFIG_SPECIAL mailbox command requires the first two
+        * words to contain special data values and no other data.
+        */
+       memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+       memcpy(&mboxq->u.mqe, &endian_mb_data, sizeof(endian_mb_data));
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0493 SLI_CONFIG_SPECIAL mailbox failed with "
+                               "status x%x\n",
+                               rc);
+               rc = -EIO;
+       }
+
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       return rc;
+}
+
+/**
+ * lpfc_sli4_queue_create - Create all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate all the SLI4 queues for the FCoE HBA
+ * operation. For each SLI4 queue type, the parameters such as queue entry
+ * count (queue depth) shall be taken from the module parameter. For now,
+ * we just use some constant number as place holder.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+static int
+lpfc_sli4_queue_create(struct lpfc_hba *phba)
+{
+       struct lpfc_queue *qdesc;
+       int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+       int cfg_fcp_wq_count;
+       int cfg_fcp_eq_count;
+
+       /*
+        * Sanity check for confiugred queue parameters against the run-time
+        * device parameters
+        */
+
+       /* Sanity check on FCP fast-path WQ parameters */
+       cfg_fcp_wq_count = phba->cfg_fcp_wq_count;
+       if (cfg_fcp_wq_count >
+           (phba->sli4_hba.max_cfg_param.max_wq - LPFC_SP_WQN_DEF)) {
+               cfg_fcp_wq_count = phba->sli4_hba.max_cfg_param.max_wq -
+                                  LPFC_SP_WQN_DEF;
+               if (cfg_fcp_wq_count < LPFC_FP_WQN_MIN) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "2581 Not enough WQs (%d) from "
+                                       "the pci function for supporting "
+                                       "FCP WQs (%d)\n",
+                                       phba->sli4_hba.max_cfg_param.max_wq,
+                                       phba->cfg_fcp_wq_count);
+                       goto out_error;
+               }
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2582 Not enough WQs (%d) from the pci "
+                               "function for supporting the requested "
+                               "FCP WQs (%d), the actual FCP WQs can "
+                               "be supported: %d\n",
+                               phba->sli4_hba.max_cfg_param.max_wq,
+                               phba->cfg_fcp_wq_count, cfg_fcp_wq_count);
+       }
+       /* The actual number of FCP work queues adopted */
+       phba->cfg_fcp_wq_count = cfg_fcp_wq_count;
+
+       /* Sanity check on FCP fast-path EQ parameters */
+       cfg_fcp_eq_count = phba->cfg_fcp_eq_count;
+       if (cfg_fcp_eq_count >
+           (phba->sli4_hba.max_cfg_param.max_eq - LPFC_SP_EQN_DEF)) {
+               cfg_fcp_eq_count = phba->sli4_hba.max_cfg_param.max_eq -
+                                  LPFC_SP_EQN_DEF;
+               if (cfg_fcp_eq_count < LPFC_FP_EQN_MIN) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "2574 Not enough EQs (%d) from the "
+                                       "pci function for supporting FCP "
+                                       "EQs (%d)\n",
+                                       phba->sli4_hba.max_cfg_param.max_eq,
+                                       phba->cfg_fcp_eq_count);
+                       goto out_error;
+               }
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2575 Not enough EQs (%d) from the pci "
+                               "function for supporting the requested "
+                               "FCP EQs (%d), the actual FCP EQs can "
+                               "be supported: %d\n",
+                               phba->sli4_hba.max_cfg_param.max_eq,
+                               phba->cfg_fcp_eq_count, cfg_fcp_eq_count);
+       }
+       /* It does not make sense to have more EQs than WQs */
+       if (cfg_fcp_eq_count > phba->cfg_fcp_wq_count) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2593 The number of FCP EQs (%d) is more "
+                               "than the number of FCP WQs (%d), take "
+                               "the number of FCP EQs same as than of "
+                               "WQs (%d)\n", cfg_fcp_eq_count,
+                               phba->cfg_fcp_wq_count,
+                               phba->cfg_fcp_wq_count);
+               cfg_fcp_eq_count = phba->cfg_fcp_wq_count;
+       }
+       /* The actual number of FCP event queues adopted */
+       phba->cfg_fcp_eq_count = cfg_fcp_eq_count;
+       /* The overall number of event queues used */
+       phba->sli4_hba.cfg_eqn = phba->cfg_fcp_eq_count + LPFC_SP_EQN_DEF;
+
+       /*
+        * Create Event Queues (EQs)
+        */
+
+       /* Get EQ depth from module parameter, fake the default for now */
+       phba->sli4_hba.eq_esize = LPFC_EQE_SIZE_4B;
+       phba->sli4_hba.eq_ecount = LPFC_EQE_DEF_COUNT;
+
+       /* Create slow path event queue */
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+                                     phba->sli4_hba.eq_ecount);
+       if (!qdesc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0496 Failed allocate slow-path EQ\n");
+               goto out_error;
+       }
+       phba->sli4_hba.sp_eq = qdesc;
+
+       /* Create fast-path FCP Event Queue(s) */
+       phba->sli4_hba.fp_eq = kzalloc((sizeof(struct lpfc_queue *) *
+                              phba->cfg_fcp_eq_count), GFP_KERNEL);
+       if (!phba->sli4_hba.fp_eq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2576 Failed allocate memory for fast-path "
+                               "EQ record array\n");
+               goto out_free_sp_eq;
+       }
+       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.eq_esize,
+                                             phba->sli4_hba.eq_ecount);
+               if (!qdesc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0497 Failed allocate fast-path EQ\n");
+                       goto out_free_fp_eq;
+               }
+               phba->sli4_hba.fp_eq[fcp_eqidx] = qdesc;
+       }
+
+       /*
+        * Create Complete Queues (CQs)
+        */
+
+       /* Get CQ depth from module parameter, fake the default for now */
+       phba->sli4_hba.cq_esize = LPFC_CQE_SIZE;
+       phba->sli4_hba.cq_ecount = LPFC_CQE_DEF_COUNT;
+
+       /* Create slow-path Mailbox Command Complete Queue */
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+                                     phba->sli4_hba.cq_ecount);
+       if (!qdesc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0500 Failed allocate slow-path mailbox CQ\n");
+               goto out_free_fp_eq;
+       }
+       phba->sli4_hba.mbx_cq = qdesc;
+
+       /* Create slow-path ELS Complete Queue */
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+                                     phba->sli4_hba.cq_ecount);
+       if (!qdesc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0501 Failed allocate slow-path ELS CQ\n");
+               goto out_free_mbx_cq;
+       }
+       phba->sli4_hba.els_cq = qdesc;
+
+       /* Create slow-path Unsolicited Receive Complete Queue */
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+                                     phba->sli4_hba.cq_ecount);
+       if (!qdesc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0502 Failed allocate slow-path USOL RX CQ\n");
+               goto out_free_els_cq;
+       }
+       phba->sli4_hba.rxq_cq = qdesc;
+
+       /* Create fast-path FCP Completion Queue(s), one-to-one with EQs */
+       phba->sli4_hba.fcp_cq = kzalloc((sizeof(struct lpfc_queue *) *
+                               phba->cfg_fcp_eq_count), GFP_KERNEL);
+       if (!phba->sli4_hba.fcp_cq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2577 Failed allocate memory for fast-path "
+                               "CQ record array\n");
+               goto out_free_rxq_cq;
+       }
+       for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.cq_esize,
+                                             phba->sli4_hba.cq_ecount);
+               if (!qdesc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0499 Failed allocate fast-path FCP "
+                                       "CQ (%d)\n", fcp_cqidx);
+                       goto out_free_fcp_cq;
+               }
+               phba->sli4_hba.fcp_cq[fcp_cqidx] = qdesc;
+       }
+
+       /* Create Mailbox Command Queue */
+       phba->sli4_hba.mq_esize = LPFC_MQE_SIZE;
+       phba->sli4_hba.mq_ecount = LPFC_MQE_DEF_COUNT;
+
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.mq_esize,
+                                     phba->sli4_hba.mq_ecount);
+       if (!qdesc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0505 Failed allocate slow-path MQ\n");
+               goto out_free_fcp_cq;
+       }
+       phba->sli4_hba.mbx_wq = qdesc;
+
+       /*
+        * Create all the Work Queues (WQs)
+        */
+       phba->sli4_hba.wq_esize = LPFC_WQE_SIZE;
+       phba->sli4_hba.wq_ecount = LPFC_WQE_DEF_COUNT;
+
+       /* Create slow-path ELS Work Queue */
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+                                     phba->sli4_hba.wq_ecount);
+       if (!qdesc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0504 Failed allocate slow-path ELS WQ\n");
+               goto out_free_mbx_wq;
+       }
+       phba->sli4_hba.els_wq = qdesc;
+
+       /* Create fast-path FCP Work Queue(s) */
+       phba->sli4_hba.fcp_wq = kzalloc((sizeof(struct lpfc_queue *) *
+                               phba->cfg_fcp_wq_count), GFP_KERNEL);
+       if (!phba->sli4_hba.fcp_wq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2578 Failed allocate memory for fast-path "
+                               "WQ record array\n");
+               goto out_free_els_wq;
+       }
+       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
+               qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.wq_esize,
+                                             phba->sli4_hba.wq_ecount);
+               if (!qdesc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0503 Failed allocate fast-path FCP "
+                                       "WQ (%d)\n", fcp_wqidx);
+                       goto out_free_fcp_wq;
+               }
+               phba->sli4_hba.fcp_wq[fcp_wqidx] = qdesc;
+       }
+
+       /*
+        * Create Receive Queue (RQ)
+        */
+       phba->sli4_hba.rq_esize = LPFC_RQE_SIZE;
+       phba->sli4_hba.rq_ecount = LPFC_RQE_DEF_COUNT;
+
+       /* Create Receive Queue for header */
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+                                     phba->sli4_hba.rq_ecount);
+       if (!qdesc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0506 Failed allocate receive HRQ\n");
+               goto out_free_fcp_wq;
+       }
+       phba->sli4_hba.hdr_rq = qdesc;
+
+       /* Create Receive Queue for data */
+       qdesc = lpfc_sli4_queue_alloc(phba, phba->sli4_hba.rq_esize,
+                                     phba->sli4_hba.rq_ecount);
+       if (!qdesc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0507 Failed allocate receive DRQ\n");
+               goto out_free_hdr_rq;
+       }
+       phba->sli4_hba.dat_rq = qdesc;
+
+       return 0;
+
+out_free_hdr_rq:
+       lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+       phba->sli4_hba.hdr_rq = NULL;
+out_free_fcp_wq:
+       for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--) {
+               lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_wqidx]);
+               phba->sli4_hba.fcp_wq[fcp_wqidx] = NULL;
+       }
+       kfree(phba->sli4_hba.fcp_wq);
+out_free_els_wq:
+       lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+       phba->sli4_hba.els_wq = NULL;
+out_free_mbx_wq:
+       lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+       phba->sli4_hba.mbx_wq = NULL;
+out_free_fcp_cq:
+       for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--) {
+               lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_cqidx]);
+               phba->sli4_hba.fcp_cq[fcp_cqidx] = NULL;
+       }
+       kfree(phba->sli4_hba.fcp_cq);
+out_free_rxq_cq:
+       lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
+       phba->sli4_hba.rxq_cq = NULL;
+out_free_els_cq:
+       lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+       phba->sli4_hba.els_cq = NULL;
+out_free_mbx_cq:
+       lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+       phba->sli4_hba.mbx_cq = NULL;
+out_free_fp_eq:
+       for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--) {
+               lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_eqidx]);
+               phba->sli4_hba.fp_eq[fcp_eqidx] = NULL;
+       }
+       kfree(phba->sli4_hba.fp_eq);
+out_free_sp_eq:
+       lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
+       phba->sli4_hba.sp_eq = NULL;
+out_error:
+       return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_queue_destroy - Destroy all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+static void
+lpfc_sli4_queue_destroy(struct lpfc_hba *phba)
+{
+       int fcp_qidx;
+
+       /* Release mailbox command work queue */
+       lpfc_sli4_queue_free(phba->sli4_hba.mbx_wq);
+       phba->sli4_hba.mbx_wq = NULL;
+
+       /* Release ELS work queue */
+       lpfc_sli4_queue_free(phba->sli4_hba.els_wq);
+       phba->sli4_hba.els_wq = NULL;
+
+       /* Release FCP work queue */
+       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
+               lpfc_sli4_queue_free(phba->sli4_hba.fcp_wq[fcp_qidx]);
+       kfree(phba->sli4_hba.fcp_wq);
+       phba->sli4_hba.fcp_wq = NULL;
+
+       /* Release unsolicited receive queue */
+       lpfc_sli4_queue_free(phba->sli4_hba.hdr_rq);
+       phba->sli4_hba.hdr_rq = NULL;
+       lpfc_sli4_queue_free(phba->sli4_hba.dat_rq);
+       phba->sli4_hba.dat_rq = NULL;
+
+       /* Release unsolicited receive complete queue */
+       lpfc_sli4_queue_free(phba->sli4_hba.rxq_cq);
+       phba->sli4_hba.rxq_cq = NULL;
+
+       /* Release ELS complete queue */
+       lpfc_sli4_queue_free(phba->sli4_hba.els_cq);
+       phba->sli4_hba.els_cq = NULL;
+
+       /* Release mailbox command complete queue */
+       lpfc_sli4_queue_free(phba->sli4_hba.mbx_cq);
+       phba->sli4_hba.mbx_cq = NULL;
+
+       /* Release FCP response complete queue */
+       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+               lpfc_sli4_queue_free(phba->sli4_hba.fcp_cq[fcp_qidx]);
+       kfree(phba->sli4_hba.fcp_cq);
+       phba->sli4_hba.fcp_cq = NULL;
+
+       /* Release fast-path event queue */
+       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+               lpfc_sli4_queue_free(phba->sli4_hba.fp_eq[fcp_qidx]);
+       kfree(phba->sli4_hba.fp_eq);
+       phba->sli4_hba.fp_eq = NULL;
+
+       /* Release slow-path event queue */
+       lpfc_sli4_queue_free(phba->sli4_hba.sp_eq);
+       phba->sli4_hba.sp_eq = NULL;
+
+       return;
+}
+
+/**
+ * lpfc_sli4_queue_setup - Set up all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up all the SLI4 queues for the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_queue_setup(struct lpfc_hba *phba)
+{
+       int rc = -ENOMEM;
+       int fcp_eqidx, fcp_cqidx, fcp_wqidx;
+       int fcp_cq_index = 0;
+
+       /*
+        * Set up Event Queues (EQs)
+        */
+
+       /* Set up slow-path event queue */
+       if (!phba->sli4_hba.sp_eq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0520 Slow-path EQ not allocated\n");
+               goto out_error;
+       }
+       rc = lpfc_eq_create(phba, phba->sli4_hba.sp_eq,
+                           LPFC_SP_DEF_IMAX);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0521 Failed setup of slow-path EQ: "
+                               "rc = 0x%x\n", rc);
+               goto out_error;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "2583 Slow-path EQ setup: queue-id=%d\n",
+                       phba->sli4_hba.sp_eq->queue_id);
+
+       /* Set up fast-path event queue */
+       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+               if (!phba->sli4_hba.fp_eq[fcp_eqidx]) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0522 Fast-path EQ (%d) not "
+                                       "allocated\n", fcp_eqidx);
+                       goto out_destroy_fp_eq;
+               }
+               rc = lpfc_eq_create(phba, phba->sli4_hba.fp_eq[fcp_eqidx],
+                                   phba->cfg_fcp_imax);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0523 Failed setup of fast-path EQ "
+                                       "(%d), rc = 0x%x\n", fcp_eqidx, rc);
+                       goto out_destroy_fp_eq;
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2584 Fast-path EQ setup: "
+                               "queue[%d]-id=%d\n", fcp_eqidx,
+                               phba->sli4_hba.fp_eq[fcp_eqidx]->queue_id);
+       }
+
+       /*
+        * Set up Complete Queues (CQs)
+        */
+
+       /* Set up slow-path MBOX Complete Queue as the first CQ */
+       if (!phba->sli4_hba.mbx_cq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0528 Mailbox CQ not allocated\n");
+               goto out_destroy_fp_eq;
+       }
+       rc = lpfc_cq_create(phba, phba->sli4_hba.mbx_cq, phba->sli4_hba.sp_eq,
+                           LPFC_MCQ, LPFC_MBOX);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0529 Failed setup of slow-path mailbox CQ: "
+                               "rc = 0x%x\n", rc);
+               goto out_destroy_fp_eq;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "2585 MBX CQ setup: cq-id=%d, parent eq-id=%d\n",
+                       phba->sli4_hba.mbx_cq->queue_id,
+                       phba->sli4_hba.sp_eq->queue_id);
+
+       /* Set up slow-path ELS Complete Queue */
+       if (!phba->sli4_hba.els_cq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0530 ELS CQ not allocated\n");
+               goto out_destroy_mbx_cq;
+       }
+       rc = lpfc_cq_create(phba, phba->sli4_hba.els_cq, phba->sli4_hba.sp_eq,
+                           LPFC_WCQ, LPFC_ELS);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0531 Failed setup of slow-path ELS CQ: "
+                               "rc = 0x%x\n", rc);
+               goto out_destroy_mbx_cq;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "2586 ELS CQ setup: cq-id=%d, parent eq-id=%d\n",
+                       phba->sli4_hba.els_cq->queue_id,
+                       phba->sli4_hba.sp_eq->queue_id);
+
+       /* Set up slow-path Unsolicited Receive Complete Queue */
+       if (!phba->sli4_hba.rxq_cq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0532 USOL RX CQ not allocated\n");
+               goto out_destroy_els_cq;
+       }
+       rc = lpfc_cq_create(phba, phba->sli4_hba.rxq_cq, phba->sli4_hba.sp_eq,
+                           LPFC_RCQ, LPFC_USOL);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0533 Failed setup of slow-path USOL RX CQ: "
+                               "rc = 0x%x\n", rc);
+               goto out_destroy_els_cq;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "2587 USL CQ setup: cq-id=%d, parent eq-id=%d\n",
+                       phba->sli4_hba.rxq_cq->queue_id,
+                       phba->sli4_hba.sp_eq->queue_id);
+
+       /* Set up fast-path FCP Response Complete Queue */
+       for (fcp_cqidx = 0; fcp_cqidx < phba->cfg_fcp_eq_count; fcp_cqidx++) {
+               if (!phba->sli4_hba.fcp_cq[fcp_cqidx]) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0526 Fast-path FCP CQ (%d) not "
+                                       "allocated\n", fcp_cqidx);
+                       goto out_destroy_fcp_cq;
+               }
+               rc = lpfc_cq_create(phba, phba->sli4_hba.fcp_cq[fcp_cqidx],
+                                   phba->sli4_hba.fp_eq[fcp_cqidx],
+                                   LPFC_WCQ, LPFC_FCP);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0527 Failed setup of fast-path FCP "
+                                       "CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+                       goto out_destroy_fcp_cq;
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2588 FCP CQ setup: cq[%d]-id=%d, "
+                               "parent eq[%d]-id=%d\n",
+                               fcp_cqidx,
+                               phba->sli4_hba.fcp_cq[fcp_cqidx]->queue_id,
+                               fcp_cqidx,
+                               phba->sli4_hba.fp_eq[fcp_cqidx]->queue_id);
+       }
+
+       /*
+        * Set up all the Work Queues (WQs)
+        */
+
+       /* Set up Mailbox Command Queue */
+       if (!phba->sli4_hba.mbx_wq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0538 Slow-path MQ not allocated\n");
+               goto out_destroy_fcp_cq;
+       }
+       rc = lpfc_mq_create(phba, phba->sli4_hba.mbx_wq,
+                           phba->sli4_hba.mbx_cq, LPFC_MBOX);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0539 Failed setup of slow-path MQ: "
+                               "rc = 0x%x\n", rc);
+               goto out_destroy_fcp_cq;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "2589 MBX MQ setup: wq-id=%d, parent cq-id=%d\n",
+                       phba->sli4_hba.mbx_wq->queue_id,
+                       phba->sli4_hba.mbx_cq->queue_id);
+
+       /* Set up slow-path ELS Work Queue */
+       if (!phba->sli4_hba.els_wq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0536 Slow-path ELS WQ not allocated\n");
+               goto out_destroy_mbx_wq;
+       }
+       rc = lpfc_wq_create(phba, phba->sli4_hba.els_wq,
+                           phba->sli4_hba.els_cq, LPFC_ELS);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0537 Failed setup of slow-path ELS WQ: "
+                               "rc = 0x%x\n", rc);
+               goto out_destroy_mbx_wq;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "2590 ELS WQ setup: wq-id=%d, parent cq-id=%d\n",
+                       phba->sli4_hba.els_wq->queue_id,
+                       phba->sli4_hba.els_cq->queue_id);
+
+       /* Set up fast-path FCP Work Queue */
+       for (fcp_wqidx = 0; fcp_wqidx < phba->cfg_fcp_wq_count; fcp_wqidx++) {
+               if (!phba->sli4_hba.fcp_wq[fcp_wqidx]) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0534 Fast-path FCP WQ (%d) not "
+                                       "allocated\n", fcp_wqidx);
+                       goto out_destroy_fcp_wq;
+               }
+               rc = lpfc_wq_create(phba, phba->sli4_hba.fcp_wq[fcp_wqidx],
+                                   phba->sli4_hba.fcp_cq[fcp_cq_index],
+                                   LPFC_FCP);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0535 Failed setup of fast-path FCP "
+                                       "WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+                       goto out_destroy_fcp_wq;
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "2591 FCP WQ setup: wq[%d]-id=%d, "
+                               "parent cq[%d]-id=%d\n",
+                               fcp_wqidx,
+                               phba->sli4_hba.fcp_wq[fcp_wqidx]->queue_id,
+                               fcp_cq_index,
+                               phba->sli4_hba.fcp_cq[fcp_cq_index]->queue_id);
+               /* Round robin FCP Work Queue's Completion Queue assignment */
+               fcp_cq_index = ((fcp_cq_index + 1) % phba->cfg_fcp_eq_count);
+       }
+
+       /*
+        * Create Receive Queue (RQ)
+        */
+       if (!phba->sli4_hba.hdr_rq || !phba->sli4_hba.dat_rq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0540 Receive Queue not allocated\n");
+               goto out_destroy_fcp_wq;
+       }
+       rc = lpfc_rq_create(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+                           phba->sli4_hba.rxq_cq, LPFC_USOL);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0541 Failed setup of Receive Queue: "
+                               "rc = 0x%x\n", rc);
+               goto out_destroy_fcp_wq;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "2592 USL RQ setup: hdr-rq-id=%d, dat-rq-id=%d "
+                       "parent cq-id=%d\n",
+                       phba->sli4_hba.hdr_rq->queue_id,
+                       phba->sli4_hba.dat_rq->queue_id,
+                       phba->sli4_hba.rxq_cq->queue_id);
+       return 0;
+
+out_destroy_fcp_wq:
+       for (--fcp_wqidx; fcp_wqidx >= 0; fcp_wqidx--)
+               lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_wqidx]);
+       lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+out_destroy_mbx_wq:
+       lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+out_destroy_fcp_cq:
+       for (--fcp_cqidx; fcp_cqidx >= 0; fcp_cqidx--)
+               lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_cqidx]);
+       lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
+out_destroy_els_cq:
+       lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+out_destroy_mbx_cq:
+       lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+out_destroy_fp_eq:
+       for (--fcp_eqidx; fcp_eqidx >= 0; fcp_eqidx--)
+               lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_eqidx]);
+       lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+out_error:
+       return rc;
+}
+
+/**
+ * lpfc_sli4_queue_unset - Unset all the SLI4 queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset all the SLI4 queues with the FCoE HBA
+ * operation.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+void
+lpfc_sli4_queue_unset(struct lpfc_hba *phba)
+{
+       int fcp_qidx;
+
+       /* Unset mailbox command work queue */
+       lpfc_mq_destroy(phba, phba->sli4_hba.mbx_wq);
+       /* Unset ELS work queue */
+       lpfc_wq_destroy(phba, phba->sli4_hba.els_wq);
+       /* Unset unsolicited receive queue */
+       lpfc_rq_destroy(phba, phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq);
+       /* Unset FCP work queue */
+       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_wq_count; fcp_qidx++)
+               lpfc_wq_destroy(phba, phba->sli4_hba.fcp_wq[fcp_qidx]);
+       /* Unset mailbox command complete queue */
+       lpfc_cq_destroy(phba, phba->sli4_hba.mbx_cq);
+       /* Unset ELS complete queue */
+       lpfc_cq_destroy(phba, phba->sli4_hba.els_cq);
+       /* Unset unsolicited receive complete queue */
+       lpfc_cq_destroy(phba, phba->sli4_hba.rxq_cq);
+       /* Unset FCP response complete queue */
+       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+               lpfc_cq_destroy(phba, phba->sli4_hba.fcp_cq[fcp_qidx]);
+       /* Unset fast-path event queue */
+       for (fcp_qidx = 0; fcp_qidx < phba->cfg_fcp_eq_count; fcp_qidx++)
+               lpfc_eq_destroy(phba, phba->sli4_hba.fp_eq[fcp_qidx]);
+       /* Unset slow-path event queue */
+       lpfc_eq_destroy(phba, phba->sli4_hba.sp_eq);
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_create - Create completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to allocate and set up a pool of completion queue
+ * events. The body of the completion queue event is a completion queue entry
+ * CQE. For now, this pool is used for the interrupt service routine to queue
+ * the following HBA completion queue events for the worker thread to process:
+ *   - Mailbox asynchronous events
+ *   - Receive queue completion unsolicited events
+ * Later, this can be used for all the slow-path events.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      -ENOMEM - No availble memory
+ **/
+static int
+lpfc_sli4_cq_event_pool_create(struct lpfc_hba *phba)
+{
+       struct lpfc_cq_event *cq_event;
+       int i;
+
+       for (i = 0; i < (4 * phba->sli4_hba.cq_ecount); i++) {
+               cq_event = kmalloc(sizeof(struct lpfc_cq_event), GFP_KERNEL);
+               if (!cq_event)
+                       goto out_pool_create_fail;
+               list_add_tail(&cq_event->list,
+                             &phba->sli4_hba.sp_cqe_event_pool);
+       }
+       return 0;
+
+out_pool_create_fail:
+       lpfc_sli4_cq_event_pool_destroy(phba);
+       return -ENOMEM;
+}
+
+/**
+ * lpfc_sli4_cq_event_pool_destroy - Free completion-queue event free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to free the pool of completion queue events at
+ * driver unload time. Note that, it is the responsibility of the driver
+ * cleanup routine to free all the outstanding completion-queue events
+ * allocated from this pool back into the pool before invoking this routine
+ * to destroy the pool.
+ **/
+static void
+lpfc_sli4_cq_event_pool_destroy(struct lpfc_hba *phba)
+{
+       struct lpfc_cq_event *cq_event, *next_cq_event;
+
+       list_for_each_entry_safe(cq_event, next_cq_event,
+                                &phba->sli4_hba.sp_cqe_event_pool, list) {
+               list_del(&cq_event->list);
+               kfree(cq_event);
+       }
+}
+
+/**
+ * __lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock free version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ *         NULL otherwise.
+ **/
+struct lpfc_cq_event *
+__lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+       struct lpfc_cq_event *cq_event = NULL;
+
+       list_remove_head(&phba->sli4_hba.sp_cqe_event_pool, cq_event,
+                        struct lpfc_cq_event, list);
+       return cq_event;
+}
+
+/**
+ * lpfc_sli4_cq_event_alloc - Allocate a completion-queue event from free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is the lock version of the API invoked to allocate a
+ * completion-queue event from the free pool.
+ *
+ * Return: Pointer to the newly allocated completion-queue event if successful
+ *         NULL otherwise.
+ **/
+struct lpfc_cq_event *
+lpfc_sli4_cq_event_alloc(struct lpfc_hba *phba)
+{
+       struct lpfc_cq_event *cq_event;
+       unsigned long iflags;
+
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       cq_event = __lpfc_sli4_cq_event_alloc(phba);
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       return cq_event;
+}
+
+/**
+ * __lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock free version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+__lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+                            struct lpfc_cq_event *cq_event)
+{
+       list_add_tail(&cq_event->list, &phba->sli4_hba.sp_cqe_event_pool);
+}
+
+/**
+ * lpfc_sli4_cq_event_release - Release a completion-queue event to free pool
+ * @phba: pointer to lpfc hba data structure.
+ * @cq_event: pointer to the completion queue event to be freed.
+ *
+ * This routine is the lock version of the API invoked to release a
+ * completion-queue event back into the free pool.
+ **/
+void
+lpfc_sli4_cq_event_release(struct lpfc_hba *phba,
+                          struct lpfc_cq_event *cq_event)
+{
+       unsigned long iflags;
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       __lpfc_sli4_cq_event_release(phba, cq_event);
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+}
+
+/**
+ * lpfc_sli4_cq_event_release_all - Release all cq events to the free pool
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to free all the pending completion-queue events to the
+ * back into the free pool for device reset.
+ **/
+static void
+lpfc_sli4_cq_event_release_all(struct lpfc_hba *phba)
+{
+       LIST_HEAD(cqelist);
+       struct lpfc_cq_event *cqe;
+       unsigned long iflags;
+
+       /* Retrieve all the pending WCQEs from pending WCQE lists */
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       /* Pending FCP XRI abort events */
+       list_splice_init(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+                        &cqelist);
+       /* Pending ELS XRI abort events */
+       list_splice_init(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+                        &cqelist);
+       /* Pending asynnc events */
+       list_splice_init(&phba->sli4_hba.sp_asynce_work_queue,
+                        &cqelist);
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+       while (!list_empty(&cqelist)) {
+               list_remove_head(&cqelist, cqe, struct lpfc_cq_event, list);
+               lpfc_sli4_cq_event_release(phba, cqe);
+       }
+}
+
+/**
+ * lpfc_pci_function_reset - Reset pci function.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to request a PCI function reset. It will destroys
+ * all resources assigned to the PCI function which originates this request.
+ *
+ * Return codes
+ *      0 - sucessful
+ *      ENOMEM - No availble memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_pci_function_reset(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *mboxq;
+       uint32_t rc = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0494 Unable to allocate memory for issuing "
+                               "SLI_FUNCTION_RESET mailbox command\n");
+               return -ENOMEM;
+       }
+
+       /* Set up PCI function reset SLI4_CONFIG mailbox-ioctl command */
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_FUNCTION_RESET, 0,
+                        LPFC_SLI4_MBX_EMBED);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mboxq, phba->mbox_mem_pool);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0495 SLI_FUNCTION_RESET mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               rc = -ENXIO;
+       }
+       return rc;
+}
+
+/**
+ * lpfc_sli4_send_nop_mbox_cmds - Send sli-4 nop mailbox commands
+ * @phba: pointer to lpfc hba data structure.
+ * @cnt: number of nop mailbox commands to send.
+ *
+ * This routine is invoked to send a number @cnt of NOP mailbox command and
+ * wait for each command to complete.
+ *
+ * Return: the number of NOP mailbox command completed.
+ **/
+static int
+lpfc_sli4_send_nop_mbox_cmds(struct lpfc_hba *phba, uint32_t cnt)
+{
+       LPFC_MBOXQ_t *mboxq;
+       int length, cmdsent;
+       uint32_t mbox_tmo;
+       uint32_t rc = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       if (cnt == 0) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2518 Requested to send 0 NOP mailbox cmd\n");
+               return cnt;
+       }
+
+       mboxq = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2519 Unable to allocate memory for issuing "
+                               "NOP mailbox command\n");
+               return 0;
+       }
+
+       /* Set up NOP SLI4_CONFIG mailbox-ioctl command */
+       length = (sizeof(struct lpfc_mbx_nop) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_NOP, length, LPFC_SLI4_MBX_EMBED);
+
+       mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+       for (cmdsent = 0; cmdsent < cnt; cmdsent++) {
+               if (!phba->sli4_hba.intr_enable)
+                       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+               else
+                       rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+               if (rc == MBX_TIMEOUT)
+                       break;
+               /* Check return status */
+               shdr = (union lpfc_sli4_cfg_shdr *)
+                       &mboxq->u.mqe.un.sli4_config.header.cfg_shdr;
+               shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+               shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
+                                        &shdr->response);
+               if (shdr_status || shdr_add_status || rc) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                                       "2520 NOP mailbox command failed "
+                                       "status x%x add_status x%x mbx "
+                                       "status x%x\n", shdr_status,
+                                       shdr_add_status, rc);
+                       break;
+               }
+       }
+
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mboxq, phba->mbox_mem_pool);
+
+       return cmdsent;
+}
+
+/**
+ * lpfc_sli4_fcfi_unreg - Unregister fcfi to device
+ * @phba: pointer to lpfc hba data structure.
+ * @fcfi: fcf index.
+ *
+ * This routine is invoked to unregister a FCFI from device.
+ **/
+void
+lpfc_sli4_fcfi_unreg(struct lpfc_hba *phba, uint16_t fcfi)
+{
+       LPFC_MBOXQ_t *mbox;
+       uint32_t mbox_tmo;
+       int rc;
+       unsigned long flags;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+       if (!mbox)
+               return;
+
+       lpfc_unreg_fcfi(mbox, fcfi);
+
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       if (rc != MBX_SUCCESS)
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2517 Unregister FCFI command failed "
+                               "status %d, mbxStatus x%x\n", rc,
+                               bf_get(lpfc_mqe_status, &mbox->u.mqe));
+       else {
+               spin_lock_irqsave(&phba->hbalock, flags);
+               /* Mark the FCFI is no longer registered */
+               phba->fcf.fcf_flag &=
+                       ~(FCF_AVAILABLE | FCF_REGISTERED | FCF_DISCOVERED);
+               spin_unlock_irqrestore(&phba->hbalock, flags);
+       }
+}
+
+/**
+ * lpfc_sli4_pci_mem_setup - Setup SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to set up the PCI device memory space for device
+ * with SLI-4 interface spec.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_sli4_pci_mem_setup(struct lpfc_hba *phba)
+{
+       struct pci_dev *pdev;
+       unsigned long bar0map_len, bar1map_len, bar2map_len;
+       int error = -ENODEV;
+
+       /* Obtain PCI device reference */
+       if (!phba->pcidev)
+               return error;
+       else
+               pdev = phba->pcidev;
+
+       /* Set the device DMA mask size */
+       if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) != 0)
+               if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0)
+                       return error;
+
+       /* Get the bus address of SLI4 device Bar0, Bar1, and Bar2 and the
+        * number of bytes required by each mapping. They are actually
+        * mapping to the PCI BAR regions 1, 2, and 4 by the SLI4 device.
+        */
+       phba->pci_bar0_map = pci_resource_start(pdev, LPFC_SLI4_BAR0);
+       bar0map_len = pci_resource_len(pdev, LPFC_SLI4_BAR0);
+
+       phba->pci_bar1_map = pci_resource_start(pdev, LPFC_SLI4_BAR1);
+       bar1map_len = pci_resource_len(pdev, LPFC_SLI4_BAR1);
+
+       phba->pci_bar2_map = pci_resource_start(pdev, LPFC_SLI4_BAR2);
+       bar2map_len = pci_resource_len(pdev, LPFC_SLI4_BAR2);
+
+       /* Map SLI4 PCI Config Space Register base to a kernel virtual addr */
+       phba->sli4_hba.conf_regs_memmap_p =
+                               ioremap(phba->pci_bar0_map, bar0map_len);
+       if (!phba->sli4_hba.conf_regs_memmap_p) {
+               dev_printk(KERN_ERR, &pdev->dev,
+                          "ioremap failed for SLI4 PCI config registers.\n");
+               goto out;
+       }
+
+       /* Map SLI4 HBA Control Register base to a kernel virtual address. */
+       phba->sli4_hba.ctrl_regs_memmap_p =
+                               ioremap(phba->pci_bar1_map, bar1map_len);
+       if (!phba->sli4_hba.ctrl_regs_memmap_p) {
+               dev_printk(KERN_ERR, &pdev->dev,
+                          "ioremap failed for SLI4 HBA control registers.\n");
+               goto out_iounmap_conf;
+       }
+
+       /* Map SLI4 HBA Doorbell Register base to a kernel virtual address. */
+       phba->sli4_hba.drbl_regs_memmap_p =
+                               ioremap(phba->pci_bar2_map, bar2map_len);
+       if (!phba->sli4_hba.drbl_regs_memmap_p) {
+               dev_printk(KERN_ERR, &pdev->dev,
+                          "ioremap failed for SLI4 HBA doorbell registers.\n");
+               goto out_iounmap_ctrl;
+       }
+
+       /* Set up BAR0 PCI config space register memory map */
+       lpfc_sli4_bar0_register_memmap(phba);
+
+       /* Set up BAR1 register memory map */
+       lpfc_sli4_bar1_register_memmap(phba);
+
+       /* Set up BAR2 register memory map */
+       error = lpfc_sli4_bar2_register_memmap(phba, LPFC_VF0);
+       if (error)
+               goto out_iounmap_all;
+
+       return 0;
+
+out_iounmap_all:
+       iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+out_iounmap_ctrl:
+       iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+out_iounmap_conf:
+       iounmap(phba->sli4_hba.conf_regs_memmap_p);
+out:
+       return error;
+}
+
+/**
+ * lpfc_sli4_pci_mem_unset - Unset SLI4 HBA PCI memory space.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the PCI device memory space for device
+ * with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_pci_mem_unset(struct lpfc_hba *phba)
+{
+       struct pci_dev *pdev;
+
+       /* Obtain PCI device reference */
+       if (!phba->pcidev)
+               return;
+       else
+               pdev = phba->pcidev;
+
+       /* Free coherent DMA memory allocated */
+
+       /* Unmap I/O memory space */
+       iounmap(phba->sli4_hba.drbl_regs_memmap_p);
+       iounmap(phba->sli4_hba.ctrl_regs_memmap_p);
+       iounmap(phba->sli4_hba.conf_regs_memmap_p);
+
+       return;
+}
+
+/**
+ * lpfc_sli_enable_msix - Enable MSI-X interrupt mode on SLI-3 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-3 interface specs. The kernel function pci_enable_msix() is
+ * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
+ * invoked, enables either all or nothing, depending on the current
+ * availability of PCI vector resources. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
+ *
+ * Return codes
+ *   0 - sucessful
+ *   other values - error
+ **/
+static int
+lpfc_sli_enable_msix(struct lpfc_hba *phba)
+{
+       int rc, i;
+       LPFC_MBOXQ_t *pmb;
+
+       /* Set up MSI-X multi-message vectors */
+       for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+               phba->msix_entries[i].entry = i;
+
+       /* Configure MSI-X capability structure */
+       rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
+                               ARRAY_SIZE(phba->msix_entries));
+       if (rc) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0420 PCI enable MSI-X failed (%d)\n", rc);
+               goto msi_fail_out;
+       }
+       for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0477 MSI-X entry[%d]: vector=x%x "
+                               "message=%d\n", i,
+                               phba->msix_entries[i].vector,
+                               phba->msix_entries[i].entry);
+       /*
+        * Assign MSI-X vectors to interrupt handlers
+        */
+
+       /* vector-0 is associated to slow-path handler */
+       rc = request_irq(phba->msix_entries[0].vector,
+                        &lpfc_sli_sp_intr_handler, IRQF_SHARED,
+                        LPFC_SP_DRIVER_HANDLER_NAME, phba);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "0421 MSI-X slow-path request_irq failed "
+                               "(%d)\n", rc);
+               goto msi_fail_out;
+       }
+
+       /* vector-1 is associated to fast-path handler */
+       rc = request_irq(phba->msix_entries[1].vector,
+                        &lpfc_sli_fp_intr_handler, IRQF_SHARED,
+                        LPFC_FP_DRIVER_HANDLER_NAME, phba);
+
+       if (rc) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "0429 MSI-X fast-path request_irq failed "
+                               "(%d)\n", rc);
+               goto irq_fail_out;
+       }
+
+       /*
+        * Configure HBA MSI-X attention conditions to messages
+        */
+       pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+
+       if (!pmb) {
+               rc = -ENOMEM;
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0474 Unable to allocate memory for issuing "
+                               "MBOX_CONFIG_MSI command\n");
+               goto mem_fail_out;
+       }
+       rc = lpfc_config_msi(phba, pmb);
+       if (rc)
+               goto mbx_fail_out;
+       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+                               "0351 Config MSI mailbox command failed, "
+                               "mbxCmd x%x, mbxStatus x%x\n",
+                               pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus);
+               goto mbx_fail_out;
+       }
+
+       /* Free memory allocated for mailbox command */
+       mempool_free(pmb, phba->mbox_mem_pool);
+       return rc;
+
+mbx_fail_out:
+       /* Free memory allocated for mailbox command */
+       mempool_free(pmb, phba->mbox_mem_pool);
+
+mem_fail_out:
+       /* free the irq already requested */
+       free_irq(phba->msix_entries[1].vector, phba);
+
+irq_fail_out:
+       /* free the irq already requested */
+       free_irq(phba->msix_entries[0].vector, phba);
+
+msi_fail_out:
+       /* Unconfigure MSI-X capability structure */
+       pci_disable_msix(phba->pcidev);
+       return rc;
+}
+
+/**
+ * lpfc_sli_disable_msix - Disable MSI-X interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-3 interface spec.
+ **/
+static void
+lpfc_sli_disable_msix(struct lpfc_hba *phba)
+{
+       int i;
+
+       /* Free up MSI-X multi-message vectors */
+       for (i = 0; i < LPFC_MSIX_VECTORS; i++)
+               free_irq(phba->msix_entries[i].vector, phba);
+       /* Disable MSI-X */
+       pci_disable_msix(phba->pcidev);
+
+       return;
+}
+
+/**
+ * lpfc_sli_enable_msi - Enable MSI interrupt mode on SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The kernel function pci_enable_msi() is called to
+ * enable the MSI vector. The device driver is responsible for calling the
+ * request_irq() to register MSI vector with a interrupt the handler, which
+ * is done in this function.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ */
+static int
+lpfc_sli_enable_msi(struct lpfc_hba *phba)
+{
+       int rc;
+
+       rc = pci_enable_msi(phba->pcidev);
+       if (!rc)
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0462 PCI enable MSI mode success.\n");
+       else {
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0471 PCI enable MSI mode failed (%d)\n", rc);
+               return rc;
+       }
+
+       rc = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+                        IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+       if (rc) {
+               pci_disable_msi(phba->pcidev);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "0478 MSI request_irq failed (%d)\n", rc);
+       }
+       return rc;
+}
+
+/**
+ * lpfc_sli_disable_msi - Disable MSI interrupt mode to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-3 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ */
+static void
+lpfc_sli_disable_msi(struct lpfc_hba *phba)
+{
+       free_irq(phba->pcidev->irq, phba);
+       pci_disable_msi(phba->pcidev);
+       return;
+}
+
+/**
+ * lpfc_sli_enable_intr - Enable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-3 interface
+ * spec. Depends on the interrupt mode configured to the driver, the driver
+ * will try to fallback from the configured interrupt mode to an interrupt
+ * mode which is supported by the platform, kernel, and device in the order
+ * of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ *   0 - sucessful
+ *   other values - error
+ **/
+static uint32_t
+lpfc_sli_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+       uint32_t intr_mode = LPFC_INTR_ERROR;
+       int retval;
+
+       if (cfg_mode == 2) {
+               /* Need to issue conf_port mbox cmd before conf_msi mbox cmd */
+               retval = lpfc_sli_config_port(phba, LPFC_SLI_REV3);
+               if (!retval) {
+                       /* Now, try to enable MSI-X interrupt mode */
+                       retval = lpfc_sli_enable_msix(phba);
+                       if (!retval) {
+                               /* Indicate initialization to MSI-X mode */
+                               phba->intr_type = MSIX;
+                               intr_mode = 2;
+                       }
+               }
+       }
+
+       /* Fallback to MSI if MSI-X initialization failed */
+       if (cfg_mode >= 1 && phba->intr_type == NONE) {
+               retval = lpfc_sli_enable_msi(phba);
+               if (!retval) {
+                       /* Indicate initialization to MSI mode */
+                       phba->intr_type = MSI;
+                       intr_mode = 1;
+               }
+       }
+
+       /* Fallback to INTx if both MSI-X/MSI initalization failed */
+       if (phba->intr_type == NONE) {
+               retval = request_irq(phba->pcidev->irq, lpfc_sli_intr_handler,
+                                    IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+               if (!retval) {
+                       /* Indicate initialization to INTx mode */
+                       phba->intr_type = INTx;
+                       intr_mode = 0;
+               }
+       }
+       return intr_mode;
+}
+
+/**
+ * lpfc_sli_disable_intr - Disable device interrupt to SLI-3 device.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate the
+ * driver's interrupt handler(s) from interrupt vector(s) to device with
+ * SLI-3 interface spec. Depending on the interrupt mode, the driver will
+ * release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli_disable_intr(struct lpfc_hba *phba)
+{
+       /* Disable the currently initialized interrupt mode */
+       if (phba->intr_type == MSIX)
+               lpfc_sli_disable_msix(phba);
+       else if (phba->intr_type == MSI)
+               lpfc_sli_disable_msi(phba);
+       else if (phba->intr_type == INTx)
+               free_irq(phba->pcidev->irq, phba);
+
+       /* Reset interrupt management states */
+       phba->intr_type = NONE;
+       phba->sli.slistat.sli_intr = 0;
+
+       return;
+}
+
+/**
+ * lpfc_sli4_enable_msix - Enable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI-X interrupt vectors to device
+ * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
+ * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
+ * enables either all or nothing, depending on the current availability of
+ * PCI vector resources. The device driver is responsible for calling the
+ * individual request_irq() to register each MSI-X vector with a interrupt
+ * handler, which is done in this function. Note that later when device is
+ * unloading, the driver should always call free_irq() on all MSI-X vectors
+ * it has done request_irq() on before calling pci_disable_msix(). Failure
+ * to do so results in a BUG_ON() and a device will be left with MSI-X
+ * enabled and leaks its vectors.
+ *
+ * Return codes
+ * 0 - sucessful
+ * other values - error
+ **/
+static int
+lpfc_sli4_enable_msix(struct lpfc_hba *phba)
+{
+       int rc, index;
+
+       /* Set up MSI-X multi-message vectors */
+       for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
+               phba->sli4_hba.msix_entries[index].entry = index;
+
+       /* Configure MSI-X capability structure */
+       rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
+                            phba->sli4_hba.cfg_eqn);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0484 PCI enable MSI-X failed (%d)\n", rc);
+               goto msi_fail_out;
+       }
+       /* Log MSI-X vector assignment */
+       for (index = 0; index < phba->sli4_hba.cfg_eqn; index++)
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0489 MSI-X entry[%d]: vector=x%x "
+                               "message=%d\n", index,
+                               phba->sli4_hba.msix_entries[index].vector,
+                               phba->sli4_hba.msix_entries[index].entry);
+       /*
+        * Assign MSI-X vectors to interrupt handlers
+        */
+
+       /* The first vector must associated to slow-path handler for MQ */
+       rc = request_irq(phba->sli4_hba.msix_entries[0].vector,
+                        &lpfc_sli4_sp_intr_handler, IRQF_SHARED,
+                        LPFC_SP_DRIVER_HANDLER_NAME, phba);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "0485 MSI-X slow-path request_irq failed "
+                               "(%d)\n", rc);
+               goto msi_fail_out;
+       }
+
+       /* The rest of the vector(s) are associated to fast-path handler(s) */
+       for (index = 1; index < phba->sli4_hba.cfg_eqn; index++) {
+               phba->sli4_hba.fcp_eq_hdl[index - 1].idx = index - 1;
+               phba->sli4_hba.fcp_eq_hdl[index - 1].phba = phba;
+               rc = request_irq(phba->sli4_hba.msix_entries[index].vector,
+                                &lpfc_sli4_fp_intr_handler, IRQF_SHARED,
+                                LPFC_FP_DRIVER_HANDLER_NAME,
+                                &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                                       "0486 MSI-X fast-path (%d) "
+                                       "request_irq failed (%d)\n", index, rc);
+                       goto cfg_fail_out;
+               }
+       }
+
+       return rc;
+
+cfg_fail_out:
+       /* free the irq already requested */
+       for (--index; index >= 1; index--)
+               free_irq(phba->sli4_hba.msix_entries[index - 1].vector,
+                        &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+
+       /* free the irq already requested */
+       free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+
+msi_fail_out:
+       /* Unconfigure MSI-X capability structure */
+       pci_disable_msix(phba->pcidev);
+       return rc;
+}
+
+/**
+ * lpfc_sli4_disable_msix - Disable MSI-X interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release the MSI-X vectors and then disable the
+ * MSI-X interrupt mode to device with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_disable_msix(struct lpfc_hba *phba)
+{
+       int index;
+
+       /* Free up MSI-X multi-message vectors */
+       free_irq(phba->sli4_hba.msix_entries[0].vector, phba);
+
+       for (index = 1; index < phba->sli4_hba.cfg_eqn; index++)
+               free_irq(phba->sli4_hba.msix_entries[index].vector,
+                        &phba->sli4_hba.fcp_eq_hdl[index - 1]);
+       /* Disable MSI-X */
+       pci_disable_msix(phba->pcidev);
+
+       return;
+}
+
+/**
+ * lpfc_sli4_enable_msi - Enable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The kernel function pci_enable_msi() is called
+ * to enable the MSI vector. The device driver is responsible for calling
+ * the request_irq() to register MSI vector with a interrupt the handler,
+ * which is done in this function.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static int
+lpfc_sli4_enable_msi(struct lpfc_hba *phba)
+{
+       int rc, index;
+
+       rc = pci_enable_msi(phba->pcidev);
+       if (!rc)
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0487 PCI enable MSI mode success.\n");
+       else {
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0488 PCI enable MSI mode failed (%d)\n", rc);
+               return rc;
+       }
+
+       rc = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+                        IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+       if (rc) {
+               pci_disable_msi(phba->pcidev);
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "0490 MSI request_irq failed (%d)\n", rc);
+       }
+
+       for (index = 0; index < phba->cfg_fcp_eq_count; index++) {
+               phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+               phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+       }
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_disable_msi - Disable MSI interrupt mode to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable the MSI interrupt mode to device with
+ * SLI-4 interface spec. The driver calls free_irq() on MSI vector it has
+ * done request_irq() on before calling pci_disable_msi(). Failure to do so
+ * results in a BUG_ON() and a device will be left with MSI enabled and leaks
+ * its vector.
+ **/
+static void
+lpfc_sli4_disable_msi(struct lpfc_hba *phba)
+{
+       free_irq(phba->pcidev->irq, phba);
+       pci_disable_msi(phba->pcidev);
+       return;
+}
+
+/**
+ * lpfc_sli4_enable_intr - Enable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to enable device interrupt and associate driver's
+ * interrupt handler(s) to interrupt vector(s) to device with SLI-4
+ * interface spec. Depends on the interrupt mode configured to the driver,
+ * the driver will try to fallback from the configured interrupt mode to an
+ * interrupt mode which is supported by the platform, kernel, and device in
+ * the order of:
+ * MSI-X -> MSI -> IRQ.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     other values - error
+ **/
+static uint32_t
+lpfc_sli4_enable_intr(struct lpfc_hba *phba, uint32_t cfg_mode)
+{
+       uint32_t intr_mode = LPFC_INTR_ERROR;
+       int retval, index;
+
+       if (cfg_mode == 2) {
+               /* Preparation before conf_msi mbox cmd */
+               retval = 0;
+               if (!retval) {
+                       /* Now, try to enable MSI-X interrupt mode */
+                       retval = lpfc_sli4_enable_msix(phba);
+                       if (!retval) {
+                               /* Indicate initialization to MSI-X mode */
+                               phba->intr_type = MSIX;
+                               intr_mode = 2;
+                       }
+               }
+       }
+
+       /* Fallback to MSI if MSI-X initialization failed */
+       if (cfg_mode >= 1 && phba->intr_type == NONE) {
+               retval = lpfc_sli4_enable_msi(phba);
+               if (!retval) {
+                       /* Indicate initialization to MSI mode */
+                       phba->intr_type = MSI;
+                       intr_mode = 1;
+               }
+       }
+
+       /* Fallback to INTx if both MSI-X/MSI initalization failed */
+       if (phba->intr_type == NONE) {
+               retval = request_irq(phba->pcidev->irq, lpfc_sli4_intr_handler,
+                                    IRQF_SHARED, LPFC_DRIVER_NAME, phba);
+               if (!retval) {
+                       /* Indicate initialization to INTx mode */
+                       phba->intr_type = INTx;
+                       intr_mode = 0;
+                       for (index = 0; index < phba->cfg_fcp_eq_count;
+                            index++) {
+                               phba->sli4_hba.fcp_eq_hdl[index].idx = index;
+                               phba->sli4_hba.fcp_eq_hdl[index].phba = phba;
+                       }
+               }
+       }
+       return intr_mode;
+}
+
+/**
+ * lpfc_sli4_disable_intr - Disable device interrupt to SLI-4 device
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to disable device interrupt and disassociate
+ * the driver's interrupt handler(s) from interrupt vector(s) to device
+ * with SLI-4 interface spec. Depending on the interrupt mode, the driver
+ * will release the interrupt vector(s) for the message signaled interrupt.
+ **/
+static void
+lpfc_sli4_disable_intr(struct lpfc_hba *phba)
+{
+       /* Disable the currently initialized interrupt mode */
+       if (phba->intr_type == MSIX)
+               lpfc_sli4_disable_msix(phba);
+       else if (phba->intr_type == MSI)
+               lpfc_sli4_disable_msi(phba);
+       else if (phba->intr_type == INTx)
+               free_irq(phba->pcidev->irq, phba);
+
+       /* Reset interrupt management states */
+       phba->intr_type = NONE;
+       phba->sli.slistat.sli_intr = 0;
+
+       return;
+}
+
+/**
+ * lpfc_unset_hba - Unset SLI3 hba device initialization
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-3 interface spec.
+ **/
+static void
+lpfc_unset_hba(struct lpfc_hba *phba)
+{
+       struct lpfc_vport *vport = phba->pport;
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+       spin_lock_irq(shost->host_lock);
+       vport->load_flag |= FC_UNLOADING;
+       spin_unlock_irq(shost->host_lock);
+
+       lpfc_stop_hba_timers(phba);
+
+       phba->pport->work_port_events = 0;
+
+       lpfc_sli_hba_down(phba);
+
+       lpfc_sli_brdrestart(phba);
+
+       lpfc_sli_disable_intr(phba);
+
+       return;
+}
+
+/**
+ * lpfc_sli4_unset_hba - Unset SLI4 hba device initialization.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to unset the HBA device initialization steps to
+ * a device with SLI-4 interface spec.
+ **/
+static void
+lpfc_sli4_unset_hba(struct lpfc_hba *phba)
+{
+       struct lpfc_vport *vport = phba->pport;
+       struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
+
+       spin_lock_irq(shost->host_lock);
+       vport->load_flag |= FC_UNLOADING;
+       spin_unlock_irq(shost->host_lock);
+
+       phba->pport->work_port_events = 0;
+
+       lpfc_sli4_hba_down(phba);
+
+       lpfc_sli4_disable_intr(phba);
+
+       return;
+}
+
+/**
+ * lpfc_sli4_hba_unset - Unset the fcoe hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to reset the HBA's FCoE
+ * function. The caller is not required to hold any lock. This routine
+ * issues PCI function reset mailbox command to reset the FCoE function.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static void
+lpfc_sli4_hba_unset(struct lpfc_hba *phba)
+{
+       int wait_cnt = 0;
+       LPFC_MBOXQ_t *mboxq;
+
+       lpfc_stop_hba_timers(phba);
+       phba->sli4_hba.intr_enable = 0;
+
+       /*
+        * Gracefully wait out the potential current outstanding asynchronous
+        * mailbox command.
+        */
+
+       /* First, block any pending async mailbox command from posted */
+       spin_lock_irq(&phba->hbalock);
+       phba->sli.sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+       spin_unlock_irq(&phba->hbalock);
+       /* Now, trying to wait it out if we can */
+       while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+               msleep(10);
+               if (++wait_cnt > LPFC_ACTIVE_MBOX_WAIT_CNT)
+                       break;
+       }
+       /* Forcefully release the outstanding mailbox command if timed out */
+       if (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+               spin_lock_irq(&phba->hbalock);
+               mboxq = phba->sli.mbox_active;
+               mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+               __lpfc_mbox_cmpl_put(phba, mboxq);
+               phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+               phba->sli.mbox_active = NULL;
+               spin_unlock_irq(&phba->hbalock);
+       }
+
+       /* Tear down the queues in the HBA */
+       lpfc_sli4_queue_unset(phba);
+
+       /* Disable PCI subsystem interrupt */
+       lpfc_sli4_disable_intr(phba);
+
+       /* Stop kthread signal shall trigger work_done one more time */
+       kthread_stop(phba->worker_thread);
+
+       /* Stop the SLI4 device port */
+       phba->pport->work_port_events = 0;
+}
+
+/**
+ * lpfc_pci_probe_one_s3 - PCI probe func to reg SLI-3 device to PCI subsystem.
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be called to attach a device with SLI-3 interface spec
+ * to the PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it can
+ * support this kind of device. If the match is successful, the driver core
+ * invokes this routine. If this routine determines it can claim the HBA, it
+ * does all the initialization that it needs to do to handle the HBA properly.
+ *
+ * Return code
+ *     0 - driver can claim the device
+ *     negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one_s3(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+       struct lpfc_hba   *phba;
+       struct lpfc_vport *vport = NULL;
+       int error;
+       uint32_t cfg_mode, intr_mode;
+
+       /* Allocate memory for HBA structure */
+       phba = lpfc_hba_alloc(pdev);
+       if (!phba)
+               return -ENOMEM;
+
+       /* Perform generic PCI device enabling operation */
+       error = lpfc_enable_pci_dev(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1401 Failed to enable pci device.\n");
+               goto out_free_phba;
+       }
+
+       /* Set up SLI API function jump table for PCI-device group-0 HBAs */
+       error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_LP);
+       if (error)
+               goto out_disable_pci_dev;
+
+       /* Set up SLI-3 specific device PCI memory space */
+       error = lpfc_sli_pci_mem_setup(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1402 Failed to set up pci memory space.\n");
+               goto out_disable_pci_dev;
+       }
+
+       /* Set up phase-1 common device driver resources */
+       error = lpfc_setup_driver_resource_phase1(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1403 Failed to set up driver resource.\n");
+               goto out_unset_pci_mem_s3;
+       }
+
+       /* Set up SLI-3 specific device driver resources */
+       error = lpfc_sli_driver_resource_setup(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1404 Failed to set up driver resource.\n");
+               goto out_unset_pci_mem_s3;
+       }
+
+       /* Initialize and populate the iocb list per host */
+       error = lpfc_init_iocb_list(phba, LPFC_IOCB_LIST_CNT);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1405 Failed to initialize iocb list.\n");
+               goto out_unset_driver_resource_s3;
+       }
+
+       /* Set up common device driver resources */
+       error = lpfc_setup_driver_resource_phase2(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1406 Failed to set up driver resource.\n");
+               goto out_free_iocb_list;
+       }
+
+       /* Create SCSI host to the physical port */
+       error = lpfc_create_shost(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1407 Failed to create scsi host.\n");
+               goto out_unset_driver_resource;
+       }
+
+       /* Configure sysfs attributes */
+       vport = phba->pport;
+       error = lpfc_alloc_sysfs_attr(vport);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1476 Failed to allocate sysfs attr\n");
+               goto out_destroy_shost;
+       }
+
+       /* Now, trying to enable interrupt and bring up the device */
+       cfg_mode = phba->cfg_use_msi;
+       while (true) {
+               /* Put device to a known state before enabling interrupt */
+               lpfc_stop_port(phba);
+               /* Configure and enable interrupt */
+               intr_mode = lpfc_sli_enable_intr(phba, cfg_mode);
+               if (intr_mode == LPFC_INTR_ERROR) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0431 Failed to enable interrupt.\n");
+                       error = -ENODEV;
+                       goto out_free_sysfs_attr;
+               }
+               /* SLI-3 HBA setup */
+               if (lpfc_sli_hba_setup(phba)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "1477 Failed to set up hba\n");
+                       error = -ENODEV;
+                       goto out_remove_device;
+               }
+
+               /* Wait 50ms for the interrupts of previous mailbox commands */
+               msleep(50);
+               /* Check active interrupts on message signaled interrupts */
+               if (intr_mode == 0 ||
+                   phba->sli.slistat.sli_intr > LPFC_MSIX_VECTORS) {
+                       /* Log the current active interrupt mode */
+                       phba->intr_mode = intr_mode;
+                       lpfc_log_intr_mode(phba, intr_mode);
+                       break;
+               } else {
+                       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                                       "0447 Configure interrupt mode (%d) "
+                                       "failed active interrupt test.\n",
+                                       intr_mode);
+                       /* Disable the current interrupt mode */
+                       lpfc_sli_disable_intr(phba);
+                       /* Try next level of interrupt mode */
+                       cfg_mode = --intr_mode;
+               }
+       }
+
+       /* Perform post initialization setup */
+       lpfc_post_init_setup(phba);
+
+       /* Check if there are static vports to be created. */
+       lpfc_create_static_vport(phba);
+
+       return 0;
+
+out_remove_device:
+       lpfc_unset_hba(phba);
+out_free_sysfs_attr:
+       lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+       lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+       lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+       lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s3:
+       lpfc_sli_driver_resource_unset(phba);
+out_unset_pci_mem_s3:
+       lpfc_sli_pci_mem_unset(phba);
+out_disable_pci_dev:
+       lpfc_disable_pci_dev(phba);
+out_free_phba:
+       lpfc_hba_free(phba);
+       return error;
+}
+
+/**
+ * lpfc_pci_remove_one_s3 - PCI func to unreg SLI-3 device from PCI subsystem.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called to disattach a device with SLI-3 interface
+ * spec from PCI subsystem. When an Emulex HBA with SLI-3 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one_s3(struct pci_dev *pdev)
+{
+       struct Scsi_Host  *shost = pci_get_drvdata(pdev);
+       struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
+       struct lpfc_vport **vports;
+       struct lpfc_hba   *phba = vport->phba;
+       int i;
+       int bars = pci_select_bars(pdev, IORESOURCE_MEM);
+
+       spin_lock_irq(&phba->hbalock);
+       vport->load_flag |= FC_UNLOADING;
+       spin_unlock_irq(&phba->hbalock);
+
+       lpfc_free_sysfs_attr(vport);
+
+       /* Release all the vports against this physical port */
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
+                       fc_vport_terminate(vports[i]->fc_vport);
+       lpfc_destroy_vport_work_array(phba, vports);
+
+       /* Remove FC host and then SCSI host with the physical port */
+       fc_remove_host(shost);
+       scsi_remove_host(shost);
+       lpfc_cleanup(vport);
+
+       /*
+        * Bring down the SLI Layer. This step disable all interrupts,
+        * clears the rings, discards all mailbox commands, and resets
+        * the HBA.
+        */
+
+       /* HBA interrupt will be diabled after this call */
+       lpfc_sli_hba_down(phba);
+       /* Stop kthread signal shall trigger work_done one more time */
+       kthread_stop(phba->worker_thread);
+       /* Final cleanup of txcmplq and reset the HBA */
+       lpfc_sli_brdrestart(phba);
+
+       lpfc_stop_hba_timers(phba);
+       spin_lock_irq(&phba->hbalock);
+       list_del_init(&vport->listentry);
+       spin_unlock_irq(&phba->hbalock);
+
+       lpfc_debugfs_terminate(vport);
+
+       /* Disable interrupt */
+       lpfc_sli_disable_intr(phba);
+
+       pci_set_drvdata(pdev, NULL);
+       scsi_host_put(shost);
+
+       /*
+        * Call scsi_free before mem_free since scsi bufs are released to their
+        * corresponding pools here.
+        */
+       lpfc_scsi_free(phba);
+       lpfc_mem_free_all(phba);
+
+       dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
+                         phba->hbqslimp.virt, phba->hbqslimp.phys);
+
+       /* Free resources associated with SLI2 interface */
+       dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+                         phba->slim2p.virt, phba->slim2p.phys);
+
+       /* unmap adapter SLIM and Control Registers */
+       iounmap(phba->ctrl_regs_memmap_p);
+       iounmap(phba->slim_memmap_p);
+
+       lpfc_hba_free(phba);
 
-out_remove_device:
-       spin_lock_irq(shost->host_lock);
-       vport->load_flag |= FC_UNLOADING;
-       spin_unlock_irq(shost->host_lock);
-       lpfc_stop_phba_timers(phba);
-       phba->pport->work_port_events = 0;
-       lpfc_disable_intr(phba);
-       lpfc_sli_hba_down(phba);
-       lpfc_sli_brdrestart(phba);
-out_free_sysfs_attr:
-       lpfc_free_sysfs_attr(vport);
-out_destroy_port:
-       destroy_port(vport);
-out_kthread_stop:
-       kthread_stop(phba->worker_thread);
-out_free_iocbq:
-       list_for_each_entry_safe(iocbq_entry, iocbq_next,
-                                               &phba->lpfc_iocb_list, list) {
-               kfree(iocbq_entry);
-               phba->total_iocbq_bufs--;
-       }
-       lpfc_mem_free(phba);
-out_free_hbqslimp:
-       dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
-                         phba->hbqslimp.virt, phba->hbqslimp.phys);
-out_free_slim:
-       dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
-                         phba->slim2p.virt, phba->slim2p.phys);
-out_iounmap:
-       iounmap(phba->ctrl_regs_memmap_p);
-out_iounmap_slim:
-       iounmap(phba->slim_memmap_p);
-out_idr_remove:
-       idr_remove(&lpfc_hba_index, phba->brd_no);
-out_free_phba:
-       kfree(phba);
-out_release_regions:
        pci_release_selected_regions(pdev, bars);
-out_disable_device:
        pci_disable_device(pdev);
-out:
-       pci_set_drvdata(pdev, NULL);
-       if (shost)
-               scsi_host_put(shost);
+}
+
+/**
+ * lpfc_pci_suspend_one_s3 - PCI func to suspend SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When
+ * PM invokes this method, it quiesces the device by stopping the driver's
+ * worker thread for the device, turning off device's interrupt and DMA,
+ * and bring the device offline. Note that as the driver implements the
+ * minimum PM requirements to a power-aware driver's PM support for the
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver will
+ * fully reinitialize its device during resume() method call, the driver will
+ * set device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
+ *
+ * Return code
+ *     0 - driver suspended the device
+ *     Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one_s3(struct pci_dev *pdev, pm_message_t msg)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "0473 PCI device Power Management suspend.\n");
+
+       /* Bring down the device */
+       lpfc_offline_prep(phba);
+       lpfc_offline(phba);
+       kthread_stop(phba->worker_thread);
+
+       /* Disable interrupt from device */
+       lpfc_sli_disable_intr(phba);
+
+       /* Save device state to PCI config space */
+       pci_save_state(pdev);
+       pci_set_power_state(pdev, PCI_D3hot);
+
+       return 0;
+}
+
+/**
+ * lpfc_pci_resume_one_s3 - PCI func to resume SLI-3 device for power mgmnt
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be called from the kernel's PCI subsystem to support
+ * system Power Management (PM) to device with SLI-3 interface spec. When PM
+ * invokes this method, it restores the device's PCI config space state and
+ * fully reinitializes the device and brings it online. Note that as the
+ * driver implements the minimum PM requirements to a power-aware driver's
+ * PM for suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE,
+ * FREEZE) to the suspend() method call will be treated as SUSPEND and the
+ * driver will fully reinitialize its device during resume() method call,
+ * the device will be set to PCI_D0 directly in PCI config space before
+ * restoring the state.
+ *
+ * Return code
+ *     0 - driver suspended the device
+ *     Error otherwise
+ **/
+static int
+lpfc_pci_resume_one_s3(struct pci_dev *pdev)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       uint32_t intr_mode;
+       int error;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "0452 PCI device Power Management resume.\n");
+
+       /* Restore device state from PCI config space */
+       pci_set_power_state(pdev, PCI_D0);
+       pci_restore_state(pdev);
+       if (pdev->is_busmaster)
+               pci_set_master(pdev);
+
+       /* Startup the kernel thread for this host adapter. */
+       phba->worker_thread = kthread_run(lpfc_do_work, phba,
+                                       "lpfc_worker_%d", phba->brd_no);
+       if (IS_ERR(phba->worker_thread)) {
+               error = PTR_ERR(phba->worker_thread);
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0434 PM resume failed to start worker "
+                               "thread: error=x%x.\n", error);
+               return error;
+       }
+
+       /* Configure and enable interrupt */
+       intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+       if (intr_mode == LPFC_INTR_ERROR) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0430 PM resume Failed to enable interrupt\n");
+               return -EIO;
+       } else
+               phba->intr_mode = intr_mode;
+
+       /* Restart HBA and bring it online */
+       lpfc_sli_brdrestart(phba);
+       lpfc_online(phba);
+
+       /* Log the current active interrupt mode */
+       lpfc_log_intr_mode(phba, phba->intr_mode);
+
+       return 0;
+}
+
+/**
+ * lpfc_io_error_detected_s3 - Method for handling SLI-3 device PCI I/O error
+ * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
+ *
+ * This routine is called from the PCI subsystem for I/O error handling to
+ * device with SLI-3 interface spec. This function is called by the PCI
+ * subsystem after a PCI bus error affecting this device has been detected.
+ * When this function is invoked, it will need to stop all the I/Os and
+ * interrupt(s) to the device. Once that is done, it will return
+ * PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to perform proper recovery
+ * as desired.
+ *
+ * Return codes
+ *     PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ *     PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected_s3(struct pci_dev *pdev, pci_channel_state_t state)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_sli_ring  *pring;
+
+       if (state == pci_channel_io_perm_failure) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0472 PCI channel I/O permanent failure\n");
+               /* Block all SCSI devices' I/Os on the host */
+               lpfc_scsi_dev_block(phba);
+               /* Clean up all driver's outstanding SCSI I/Os */
+               lpfc_sli_flush_fcp_rings(phba);
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       pci_disable_device(pdev);
+       /*
+        * There may be I/Os dropped by the firmware.
+        * Error iocb (I/O) on txcmplq and let the SCSI layer
+        * retry it after re-establishing link.
+        */
+       pring = &psli->ring[psli->fcp_ring];
+       lpfc_sli_abort_iocb_ring(phba, pring);
+
+       /* Disable interrupt */
+       lpfc_sli_disable_intr(phba);
+
+       /* Request a slot reset. */
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * lpfc_io_slot_reset_s3 - Method for restarting PCI SLI-3 device from scratch.
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to
+ * device with SLI-3 interface spec. This is called after PCI bus has been
+ * reset to restart the PCI card from scratch, as if from a cold-boot.
+ * During the PCI subsystem error recovery, after driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method
+ * to recover the device. This function will initialize the HBA device,
+ * enable the interrupt, but it will just put the HBA to offline state
+ * without passing any I/O traffic.
+ *
+ * Return codes
+ *     PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ *     PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s3(struct pci_dev *pdev)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       struct lpfc_sli *psli = &phba->sli;
+       uint32_t intr_mode;
+
+       dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
+       if (pci_enable_device_mem(pdev)) {
+               printk(KERN_ERR "lpfc: Cannot re-enable "
+                       "PCI device after reset.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       }
+
+       pci_restore_state(pdev);
+       if (pdev->is_busmaster)
+               pci_set_master(pdev);
+
+       spin_lock_irq(&phba->hbalock);
+       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+       spin_unlock_irq(&phba->hbalock);
+
+       /* Configure and enable interrupt */
+       intr_mode = lpfc_sli_enable_intr(phba, phba->intr_mode);
+       if (intr_mode == LPFC_INTR_ERROR) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0427 Cannot re-enable interrupt after "
+                               "slot reset.\n");
+               return PCI_ERS_RESULT_DISCONNECT;
+       } else
+               phba->intr_mode = intr_mode;
+
+       /* Take device offline; this will perform cleanup */
+       lpfc_offline(phba);
+       lpfc_sli_brdrestart(phba);
+
+       /* Log the current active interrupt mode */
+       lpfc_log_intr_mode(phba, phba->intr_mode);
+
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s3 - Method for resuming PCI I/O operation on SLI-3 device.
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-3 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ */
+static void
+lpfc_io_resume_s3(struct pci_dev *pdev)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+
+       lpfc_online(phba);
+}
+
+/**
+ * lpfc_sli4_get_els_iocb_cnt - Calculate the # of ELS IOCBs to reserve
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * returns the number of ELS/CT IOCBs to reserve
+ **/
+int
+lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *phba)
+{
+       int max_xri = phba->sli4_hba.max_cfg_param.max_xri;
+
+       if (max_xri <= 100)
+               return 4;
+       else if (max_xri <= 256)
+               return 8;
+       else if (max_xri <= 512)
+               return 16;
+       else if (max_xri <= 1024)
+               return 32;
+       else
+               return 48;
+}
+
+/**
+ * lpfc_pci_probe_one_s4 - PCI probe func to reg SLI-4 device to PCI subsys
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * presented on PCI bus, the kernel PCI subsystem looks at PCI device-specific
+ * information of the device and driver to see if the driver state that it
+ * can support this kind of device. If the match is successful, the driver
+ * core invokes this routine. If this routine determines it can claim the HBA,
+ * it does all the initialization that it needs to do to handle the HBA
+ * properly.
+ *
+ * Return code
+ *     0 - driver can claim the device
+ *     negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one_s4(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+       struct lpfc_hba   *phba;
+       struct lpfc_vport *vport = NULL;
+       int error;
+       uint32_t cfg_mode, intr_mode;
+       int mcnt;
+
+       /* Allocate memory for HBA structure */
+       phba = lpfc_hba_alloc(pdev);
+       if (!phba)
+               return -ENOMEM;
+
+       /* Perform generic PCI device enabling operation */
+       error = lpfc_enable_pci_dev(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1409 Failed to enable pci device.\n");
+               goto out_free_phba;
+       }
+
+       /* Set up SLI API function jump table for PCI-device group-1 HBAs */
+       error = lpfc_api_table_setup(phba, LPFC_PCI_DEV_OC);
+       if (error)
+               goto out_disable_pci_dev;
+
+       /* Set up SLI-4 specific device PCI memory space */
+       error = lpfc_sli4_pci_mem_setup(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1410 Failed to set up pci memory space.\n");
+               goto out_disable_pci_dev;
+       }
+
+       /* Set up phase-1 common device driver resources */
+       error = lpfc_setup_driver_resource_phase1(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1411 Failed to set up driver resource.\n");
+               goto out_unset_pci_mem_s4;
+       }
+
+       /* Set up SLI-4 Specific device driver resources */
+       error = lpfc_sli4_driver_resource_setup(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1412 Failed to set up driver resource.\n");
+               goto out_unset_pci_mem_s4;
+       }
+
+       /* Initialize and populate the iocb list per host */
+       error = lpfc_init_iocb_list(phba,
+                       phba->sli4_hba.max_cfg_param.max_xri);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1413 Failed to initialize iocb list.\n");
+               goto out_unset_driver_resource_s4;
+       }
+
+       /* Set up common device driver resources */
+       error = lpfc_setup_driver_resource_phase2(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1414 Failed to set up driver resource.\n");
+               goto out_free_iocb_list;
+       }
+
+       /* Create SCSI host to the physical port */
+       error = lpfc_create_shost(phba);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1415 Failed to create scsi host.\n");
+               goto out_unset_driver_resource;
+       }
+
+       /* Configure sysfs attributes */
+       vport = phba->pport;
+       error = lpfc_alloc_sysfs_attr(vport);
+       if (error) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1416 Failed to allocate sysfs attr\n");
+               goto out_destroy_shost;
+       }
+
+       /* Now, trying to enable interrupt and bring up the device */
+       cfg_mode = phba->cfg_use_msi;
+       while (true) {
+               /* Put device to a known state before enabling interrupt */
+               lpfc_stop_port(phba);
+               /* Configure and enable interrupt */
+               intr_mode = lpfc_sli4_enable_intr(phba, cfg_mode);
+               if (intr_mode == LPFC_INTR_ERROR) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "0426 Failed to enable interrupt.\n");
+                       error = -ENODEV;
+                       goto out_free_sysfs_attr;
+               }
+               /* Set up SLI-4 HBA */
+               if (lpfc_sli4_hba_setup(phba)) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "1421 Failed to set up hba\n");
+                       error = -ENODEV;
+                       goto out_disable_intr;
+               }
+
+               /* Send NOP mbx cmds for non-INTx mode active interrupt test */
+               if (intr_mode != 0)
+                       mcnt = lpfc_sli4_send_nop_mbox_cmds(phba,
+                                                           LPFC_ACT_INTR_CNT);
+
+               /* Check active interrupts received only for MSI/MSI-X */
+               if (intr_mode == 0 ||
+                   phba->sli.slistat.sli_intr >= LPFC_ACT_INTR_CNT) {
+                       /* Log the current active interrupt mode */
+                       phba->intr_mode = intr_mode;
+                       lpfc_log_intr_mode(phba, intr_mode);
+                       break;
+               }
+               lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                               "0451 Configure interrupt mode (%d) "
+                               "failed active interrupt test.\n",
+                               intr_mode);
+               /* Unset the preivous SLI-4 HBA setup */
+               lpfc_sli4_unset_hba(phba);
+               /* Try next level of interrupt mode */
+               cfg_mode = --intr_mode;
+       }
+
+       /* Perform post initialization setup */
+       lpfc_post_init_setup(phba);
+
+       return 0;
+
+out_disable_intr:
+       lpfc_sli4_disable_intr(phba);
+out_free_sysfs_attr:
+       lpfc_free_sysfs_attr(vport);
+out_destroy_shost:
+       lpfc_destroy_shost(phba);
+out_unset_driver_resource:
+       lpfc_unset_driver_resource_phase2(phba);
+out_free_iocb_list:
+       lpfc_free_iocb_list(phba);
+out_unset_driver_resource_s4:
+       lpfc_sli4_driver_resource_unset(phba);
+out_unset_pci_mem_s4:
+       lpfc_sli4_pci_mem_unset(phba);
+out_disable_pci_dev:
+       lpfc_disable_pci_dev(phba);
+out_free_phba:
+       lpfc_hba_free(phba);
        return error;
 }
 
 /**
- * lpfc_pci_remove_one - lpfc PCI func to unregister device from PCI subsystem
+ * lpfc_pci_remove_one_s4 - PCI func to unreg SLI-4 device from PCI subsystem
  * @pdev: pointer to PCI device
  *
- * This routine is to be registered to the kernel's PCI subsystem. When an
- * Emulex HBA is removed from PCI bus, it performs all the necessary cleanup
- * for the HBA device to be removed from the PCI subsystem properly.
+ * This routine is called from the kernel's PCI subsystem to device with
+ * SLI-4 interface spec. When an Emulex HBA with SLI-4 interface spec is
+ * removed from PCI bus, it performs all the necessary cleanup for the HBA
+ * device to be removed from the PCI subsystem properly.
  **/
 static void __devexit
-lpfc_pci_remove_one(struct pci_dev *pdev)
+lpfc_pci_remove_one_s4(struct pci_dev *pdev)
 {
-       struct Scsi_Host  *shost = pci_get_drvdata(pdev);
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_vport **vports;
-       struct lpfc_hba   *phba = vport->phba;
+       struct lpfc_hba *phba = vport->phba;
        int i;
-       int bars = pci_select_bars(pdev, IORESOURCE_MEM);
 
+       /* Mark the device unloading flag */
        spin_lock_irq(&phba->hbalock);
        vport->load_flag |= FC_UNLOADING;
        spin_unlock_irq(&phba->hbalock);
 
+       /* Free the HBA sysfs attributes */
        lpfc_free_sysfs_attr(vport);
 
        /* Release all the vports against this physical port */
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for (i = 1; i <= phba->max_vpi && vports[i] != NULL; i++)
+               for (i = 1; i <= phba->max_vports && vports[i] != NULL; i++)
                        fc_vport_terminate(vports[i]->fc_vport);
        lpfc_destroy_vport_work_array(phba, vports);
 
        /* Remove FC host and then SCSI host with the physical port */
        fc_remove_host(shost);
        scsi_remove_host(shost);
+
+       /* Perform cleanup on the physical port */
        lpfc_cleanup(vport);
 
        /*
-        * Bring down the SLI Layer. This step disable all interrupts,
+        * Bring down the SLI Layer. This step disables all interrupts,
         * clears the rings, discards all mailbox commands, and resets
-        * the HBA.
+        * the HBA FCoE function.
         */
+       lpfc_debugfs_terminate(vport);
+       lpfc_sli4_hba_unset(phba);
 
-       /* HBA interrupt will be diabled after this call */
-       lpfc_sli_hba_down(phba);
-       /* Stop kthread signal shall trigger work_done one more time */
-       kthread_stop(phba->worker_thread);
-       /* Final cleanup of txcmplq and reset the HBA */
-       lpfc_sli_brdrestart(phba);
-
-       lpfc_stop_phba_timers(phba);
        spin_lock_irq(&phba->hbalock);
        list_del_init(&vport->listentry);
        spin_unlock_irq(&phba->hbalock);
 
-       lpfc_debugfs_terminate(vport);
-
-       /* Disable interrupt */
-       lpfc_disable_intr(phba);
-
-       pci_set_drvdata(pdev, NULL);
-       scsi_host_put(shost);
-
-       /*
-        * Call scsi_free before mem_free since scsi bufs are released to their
-        * corresponding pools here.
+       /* Call scsi_free before lpfc_sli4_driver_resource_unset since scsi
+        * buffers are released to their corresponding pools here.
         */
        lpfc_scsi_free(phba);
-       lpfc_mem_free(phba);
-
-       dma_free_coherent(&pdev->dev, lpfc_sli_hbq_size(),
-                         phba->hbqslimp.virt, phba->hbqslimp.phys);
-
-       /* Free resources associated with SLI2 interface */
-       dma_free_coherent(&pdev->dev, SLI2_SLIM_SIZE,
-                         phba->slim2p.virt, phba->slim2p.phys);
+       lpfc_sli4_driver_resource_unset(phba);
 
-       /* unmap adapter SLIM and Control Registers */
-       iounmap(phba->ctrl_regs_memmap_p);
-       iounmap(phba->slim_memmap_p);
+       /* Unmap adapter Control and Doorbell registers */
+       lpfc_sli4_pci_mem_unset(phba);
 
-       idr_remove(&lpfc_hba_index, phba->brd_no);
+       /* Release PCI resources and disable device's PCI function */
+       scsi_host_put(shost);
+       lpfc_disable_pci_dev(phba);
 
-       kfree(phba);
+       /* Finally, free the driver's device data structure */
+       lpfc_hba_free(phba);
 
-       pci_release_selected_regions(pdev, bars);
-       pci_disable_device(pdev);
+       return;
 }
 
 /**
- * lpfc_pci_suspend_one - lpfc PCI func to suspend device for power management
+ * lpfc_pci_suspend_one_s4 - PCI func to suspend SLI-4 device for power mgmnt
  * @pdev: pointer to PCI device
  * @msg: power management message
  *
- * This routine is to be registered to the kernel's PCI subsystem to support
- * system Power Management (PM). When PM invokes this method, it quiesces the
- * device by stopping the driver's worker thread for the device, turning off
- * device's interrupt and DMA, and bring the device offline. Note that as the
- * driver implements the minimum PM requirements to a power-aware driver's PM
- * support for suspend/resume -- all the possible PM messages (SUSPEND,
- * HIBERNATE, FREEZE) to the suspend() method call will be treated as SUSPEND
- * and the driver will fully reinitialize its device during resume() method
- * call, the driver will set device to PCI_D3hot state in PCI config space
- * instead of setting it according to the @msg provided by the PM.
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spec. When PM invokes
+ * this method, it quiesces the device by stopping the driver's worker
+ * thread for the device, turning off device's interrupt and DMA, and bring
+ * the device offline. Note that as the driver implements the minimum PM
+ * requirements to a power-aware driver's PM support for suspend/resume -- all
+ * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
+ * method call will be treated as SUSPEND and the driver will fully
+ * reinitialize its device during resume() method call, the driver will set
+ * device to PCI_D3hot state in PCI config space instead of setting it
+ * according to the @msg provided by the PM.
  *
  * Return code
- *   0 - driver suspended the device
- *   Error otherwise
+ *     0 - driver suspended the device
+ *     Error otherwise
  **/
 static int
-lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+lpfc_pci_suspend_one_s4(struct pci_dev *pdev, pm_message_t msg)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "0473 PCI device Power Management suspend.\n");
+                       "0298 PCI device Power Management suspend.\n");
 
        /* Bring down the device */
        lpfc_offline_prep(phba);
@@ -3194,7 +7476,7 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
        kthread_stop(phba->worker_thread);
 
        /* Disable interrupt from device */
-       lpfc_disable_intr(phba);
+       lpfc_sli4_disable_intr(phba);
 
        /* Save device state to PCI config space */
        pci_save_state(pdev);
@@ -3204,25 +7486,26 @@ lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
 }
 
 /**
- * lpfc_pci_resume_one - lpfc PCI func to resume device for power management
+ * lpfc_pci_resume_one_s4 - PCI func to resume SLI-4 device for power mgmnt
  * @pdev: pointer to PCI device
  *
- * This routine is to be registered to the kernel's PCI subsystem to support
- * system Power Management (PM). When PM invokes this method, it restores
- * the device's PCI config space state and fully reinitializes the device
- * and brings it online. Note that as the driver implements the minimum PM
- * requirements to a power-aware driver's PM for suspend/resume -- all
- * the possible PM messages (SUSPEND, HIBERNATE, FREEZE) to the suspend()
- * method call will be treated as SUSPEND and the driver will fully
- * reinitialize its device during resume() method call, the device will be
- * set to PCI_D0 directly in PCI config space before restoring the state.
+ * This routine is called from the kernel's PCI subsystem to support system
+ * Power Management (PM) to device with SLI-4 interface spac. When PM invokes
+ * this method, it restores the device's PCI config space state and fully
+ * reinitializes the device and brings it online. Note that as the driver
+ * implements the minimum PM requirements to a power-aware driver's PM for
+ * suspend/resume -- all the possible PM messages (SUSPEND, HIBERNATE, FREEZE)
+ * to the suspend() method call will be treated as SUSPEND and the driver
+ * will fully reinitialize its device during resume() method call, the device
+ * will be set to PCI_D0 directly in PCI config space before restoring the
+ * state.
  *
  * Return code
- *   0 - driver suspended the device
- *   Error otherwise
+ *     0 - driver suspended the device
+ *     Error otherwise
  **/
 static int
-lpfc_pci_resume_one(struct pci_dev *pdev)
+lpfc_pci_resume_one_s4(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
@@ -3230,7 +7513,7 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
        int error;
 
        lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
-                       "0452 PCI device Power Management resume.\n");
+                       "0292 PCI device Power Management resume.\n");
 
        /* Restore device state from PCI config space */
        pci_set_power_state(pdev, PCI_D0);
@@ -3238,22 +7521,22 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
        if (pdev->is_busmaster)
                pci_set_master(pdev);
 
-       /* Startup the kernel thread for this host adapter. */
+        /* Startup the kernel thread for this host adapter. */
        phba->worker_thread = kthread_run(lpfc_do_work, phba,
                                        "lpfc_worker_%d", phba->brd_no);
        if (IS_ERR(phba->worker_thread)) {
                error = PTR_ERR(phba->worker_thread);
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0434 PM resume failed to start worker "
+                               "0293 PM resume failed to start worker "
                                "thread: error=x%x.\n", error);
                return error;
        }
 
        /* Configure and enable interrupt */
-       intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
+       intr_mode = lpfc_sli4_enable_intr(phba, phba->intr_mode);
        if (intr_mode == LPFC_INTR_ERROR) {
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0430 PM resume Failed to enable interrupt\n");
+                               "0294 PM resume Failed to enable interrupt\n");
                return -EIO;
        } else
                phba->intr_mode = intr_mode;
@@ -3269,129 +7552,316 @@ lpfc_pci_resume_one(struct pci_dev *pdev)
 }
 
 /**
- * lpfc_io_error_detected - Driver method for handling PCI I/O error detected
+ * lpfc_io_error_detected_s4 - Method for handling PCI I/O error to SLI-4 device
  * @pdev: pointer to PCI device.
  * @state: the current PCI connection state.
  *
- * This routine is registered to the PCI subsystem for error handling. This
- * function is called by the PCI subsystem after a PCI bus error affecting
- * this device has been detected. When this function is invoked, it will
- * need to stop all the I/Os and interrupt(s) to the device. Once that is
- * done, it will return PCI_ERS_RESULT_NEED_RESET for the PCI subsystem to
- * perform proper recovery as desired.
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. This function is called by the PCI subsystem
+ * after a PCI bus error affecting this device has been detected. When this
+ * function is invoked, it will need to stop all the I/Os and interrupt(s)
+ * to the device. Once that is done, it will return PCI_ERS_RESULT_NEED_RESET
+ * for the PCI subsystem to perform proper recovery as desired.
  *
  * Return codes
- *   PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
- *   PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ *     PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ *     PCI_ERS_RESULT_DISCONNECT - device could not be recovered
  **/
-static pci_ers_result_t lpfc_io_error_detected(struct pci_dev *pdev,
-                               pci_channel_state_t state)
+static pci_ers_result_t
+lpfc_io_error_detected_s4(struct pci_dev *pdev, pci_channel_state_t state)
+{
+       return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * lpfc_io_slot_reset_s4 - Method for restart PCI SLI-4 device from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called after PCI bus has been reset to
+ * restart the PCI card from scratch, as if from a cold-boot. During the
+ * PCI subsystem error recovery, after the driver returns
+ * PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform proper error
+ * recovery and then call this routine before calling the .resume method to
+ * recover the device. This function will initialize the HBA device, enable
+ * the interrupt, but it will just put the HBA to offline state without
+ * passing any I/O traffic.
+ *
+ * Return codes
+ *     PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ *     PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ */
+static pci_ers_result_t
+lpfc_io_slot_reset_s4(struct pci_dev *pdev)
+{
+       return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * lpfc_io_resume_s4 - Method for resuming PCI I/O operation to SLI-4 device
+ * @pdev: pointer to PCI device
+ *
+ * This routine is called from the PCI subsystem for error handling to device
+ * with SLI-4 interface spec. It is called when kernel error recovery tells
+ * the lpfc driver that it is ok to resume normal PCI operation after PCI bus
+ * error recovery. After this call, traffic can start to flow from this device
+ * again.
+ **/
+static void
+lpfc_io_resume_s4(struct pci_dev *pdev)
+{
+       return;
+}
+
+/**
+ * lpfc_pci_probe_one - lpfc PCI probe func to reg dev to PCI subsystem
+ * @pdev: pointer to PCI device
+ * @pid: pointer to PCI device identifier
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA device is presented on PCI bus, the kernel PCI subsystem looks
+ * at PCI device-specific information of the device and driver to see if the
+ * driver state that it can support this kind of device. If the match is
+ * successful, the driver core invokes this routine. This routine dispatches
+ * the action to the proper SLI-3 or SLI-4 device probing routine, which will
+ * do all the initialization that it needs to do to handle the HBA device
+ * properly.
+ *
+ * Return code
+ *     0 - driver can claim the device
+ *     negative value - driver can not claim the device
+ **/
+static int __devinit
+lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
+{
+       int rc;
+       uint16_t dev_id;
+
+       if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
+               return -ENODEV;
+
+       switch (dev_id) {
+       case PCI_DEVICE_ID_TIGERSHARK:
+       case PCI_DEVICE_ID_TIGERSHARK_S:
+               rc = lpfc_pci_probe_one_s4(pdev, pid);
+               break;
+       default:
+               rc = lpfc_pci_probe_one_s3(pdev, pid);
+               break;
+       }
+       return rc;
+}
+
+/**
+ * lpfc_pci_remove_one - lpfc PCI func to unreg dev from PCI subsystem
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem. When an
+ * Emulex HBA is removed from PCI bus, the driver core invokes this routine.
+ * This routine dispatches the action to the proper SLI-3 or SLI-4 device
+ * remove routine, which will perform all the necessary cleanup for the
+ * device to be removed from the PCI subsystem properly.
+ **/
+static void __devexit
+lpfc_pci_remove_one(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-       struct lpfc_sli *psli = &phba->sli;
-       struct lpfc_sli_ring  *pring;
 
-       if (state == pci_channel_io_perm_failure) {
+       switch (phba->pci_dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               lpfc_pci_remove_one_s3(pdev);
+               break;
+       case LPFC_PCI_DEV_OC:
+               lpfc_pci_remove_one_s4(pdev);
+               break;
+       default:
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0472 PCI channel I/O permanent failure\n");
-               /* Block all SCSI devices' I/Os on the host */
-               lpfc_scsi_dev_block(phba);
-               /* Clean up all driver's outstanding SCSI I/Os */
-               lpfc_sli_flush_fcp_rings(phba);
-               return PCI_ERS_RESULT_DISCONNECT;
+                               "1424 Invalid PCI device group: 0x%x\n",
+                               phba->pci_dev_grp);
+               break;
        }
+       return;
+}
 
-       pci_disable_device(pdev);
-       /*
-        * There may be I/Os dropped by the firmware.
-        * Error iocb (I/O) on txcmplq and let the SCSI layer
-        * retry it after re-establishing link.
-        */
-       pring = &psli->ring[psli->fcp_ring];
-       lpfc_sli_abort_iocb_ring(phba, pring);
+/**
+ * lpfc_pci_suspend_one - lpfc PCI func to suspend dev for power management
+ * @pdev: pointer to PCI device
+ * @msg: power management message
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device suspend routine, which will
+ * suspend the device.
+ *
+ * Return code
+ *     0 - driver suspended the device
+ *     Error otherwise
+ **/
+static int
+lpfc_pci_suspend_one(struct pci_dev *pdev, pm_message_t msg)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       int rc = -ENODEV;
 
-       /* Disable interrupt */
-       lpfc_disable_intr(phba);
+       switch (phba->pci_dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               rc = lpfc_pci_suspend_one_s3(pdev, msg);
+               break;
+       case LPFC_PCI_DEV_OC:
+               rc = lpfc_pci_suspend_one_s4(pdev, msg);
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1425 Invalid PCI device group: 0x%x\n",
+                               phba->pci_dev_grp);
+               break;
+       }
+       return rc;
+}
 
-       /* Request a slot reset. */
-       return PCI_ERS_RESULT_NEED_RESET;
+/**
+ * lpfc_pci_resume_one - lpfc PCI func to resume dev for power management
+ * @pdev: pointer to PCI device
+ *
+ * This routine is to be registered to the kernel's PCI subsystem to support
+ * system Power Management (PM). When PM invokes this method, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device resume routine, which will
+ * resume the device.
+ *
+ * Return code
+ *     0 - driver suspended the device
+ *     Error otherwise
+ **/
+static int
+lpfc_pci_resume_one(struct pci_dev *pdev)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       int rc = -ENODEV;
+
+       switch (phba->pci_dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               rc = lpfc_pci_resume_one_s3(pdev);
+               break;
+       case LPFC_PCI_DEV_OC:
+               rc = lpfc_pci_resume_one_s4(pdev);
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1426 Invalid PCI device group: 0x%x\n",
+                               phba->pci_dev_grp);
+               break;
+       }
+       return rc;
 }
 
 /**
- * lpfc_io_slot_reset - Restart a PCI device from scratch
+ * lpfc_io_error_detected - lpfc method for handling PCI I/O error
  * @pdev: pointer to PCI device.
+ * @state: the current PCI connection state.
  *
- * This routine is registered to the PCI subsystem for error handling. This is
- * called after PCI bus has been reset to restart the PCI card from scratch,
- * as if from a cold-boot. During the PCI subsystem error recovery, after the
- * driver returns PCI_ERS_RESULT_NEED_RESET, the PCI subsystem will perform
- * proper error recovery and then call this routine before calling the .resume
- * method to recover the device. This function will initialize the HBA device,
- * enable the interrupt, but it will just put the HBA to offline state without
- * passing any I/O traffic.
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called by the PCI subsystem after a PCI bus error affecting
+ * this device has been detected. When this routine is invoked, it dispatches
+ * the action to the proper SLI-3 or SLI-4 device error detected handling
+ * routine, which will perform the proper error detected operation.
  *
  * Return codes
- *   PCI_ERS_RESULT_RECOVERED - the device has been recovered
- *   PCI_ERS_RESULT_DISCONNECT - device could not be recovered
- */
-static pci_ers_result_t lpfc_io_slot_reset(struct pci_dev *pdev)
+ *     PCI_ERS_RESULT_NEED_RESET - need to reset before recovery
+ *     PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_error_detected(struct pci_dev *pdev, pci_channel_state_t state)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
-       struct lpfc_sli *psli = &phba->sli;
-       uint32_t intr_mode;
+       pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
 
-       dev_printk(KERN_INFO, &pdev->dev, "recovering from a slot reset.\n");
-       if (pci_enable_device_mem(pdev)) {
-               printk(KERN_ERR "lpfc: Cannot re-enable "
-                       "PCI device after reset.\n");
-               return PCI_ERS_RESULT_DISCONNECT;
+       switch (phba->pci_dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               rc = lpfc_io_error_detected_s3(pdev, state);
+               break;
+       case LPFC_PCI_DEV_OC:
+               rc = lpfc_io_error_detected_s4(pdev, state);
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1427 Invalid PCI device group: 0x%x\n",
+                               phba->pci_dev_grp);
+               break;
        }
+       return rc;
+}
 
-       pci_restore_state(pdev);
-       if (pdev->is_busmaster)
-               pci_set_master(pdev);
-
-       spin_lock_irq(&phba->hbalock);
-       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
-       spin_unlock_irq(&phba->hbalock);
+/**
+ * lpfc_io_slot_reset - lpfc method for restart PCI dev from scratch
+ * @pdev: pointer to PCI device.
+ *
+ * This routine is registered to the PCI subsystem for error handling. This
+ * function is called after PCI bus has been reset to restart the PCI card
+ * from scratch, as if from a cold-boot. When this routine is invoked, it
+ * dispatches the action to the proper SLI-3 or SLI-4 device reset handling
+ * routine, which will perform the proper device reset.
+ *
+ * Return codes
+ *     PCI_ERS_RESULT_RECOVERED - the device has been recovered
+ *     PCI_ERS_RESULT_DISCONNECT - device could not be recovered
+ **/
+static pci_ers_result_t
+lpfc_io_slot_reset(struct pci_dev *pdev)
+{
+       struct Scsi_Host *shost = pci_get_drvdata(pdev);
+       struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
+       pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
 
-       /* Configure and enable interrupt */
-       intr_mode = lpfc_enable_intr(phba, phba->intr_mode);
-       if (intr_mode == LPFC_INTR_ERROR) {
+       switch (phba->pci_dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               rc = lpfc_io_slot_reset_s3(pdev);
+               break;
+       case LPFC_PCI_DEV_OC:
+               rc = lpfc_io_slot_reset_s4(pdev);
+               break;
+       default:
                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-                               "0427 Cannot re-enable interrupt after "
-                               "slot reset.\n");
-               return PCI_ERS_RESULT_DISCONNECT;
-       } else
-               phba->intr_mode = intr_mode;
-
-       /* Take device offline; this will perform cleanup */
-       lpfc_offline(phba);
-       lpfc_sli_brdrestart(phba);
-
-       /* Log the current active interrupt mode */
-       lpfc_log_intr_mode(phba, phba->intr_mode);
-
-       return PCI_ERS_RESULT_RECOVERED;
+                               "1428 Invalid PCI device group: 0x%x\n",
+                               phba->pci_dev_grp);
+               break;
+       }
+       return rc;
 }
 
 /**
- * lpfc_io_resume - Resume PCI I/O operation
+ * lpfc_io_resume - lpfc method for resuming PCI I/O operation
  * @pdev: pointer to PCI device
  *
- * This routine is registered to the PCI subsystem for error handling. It is
- * called when kernel error recovery tells the lpfc driver that it is ok to
- * resume normal PCI operation after PCI bus error recovery. After this call,
- * traffic can start to flow from this device again.
- */
-static void lpfc_io_resume(struct pci_dev *pdev)
+ * This routine is registered to the PCI subsystem for error handling. It
+ * is called when kernel error recovery tells the lpfc driver that it is
+ * OK to resume normal PCI operation after PCI bus error recovery. When
+ * this routine is invoked, it dispatches the action to the proper SLI-3
+ * or SLI-4 device io_resume routine, which will resume the device operation.
+ **/
+static void
+lpfc_io_resume(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct lpfc_hba *phba = ((struct lpfc_vport *)shost->hostdata)->phba;
 
-       lpfc_online(phba);
+       switch (phba->pci_dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               lpfc_io_resume_s3(pdev);
+               break;
+       case LPFC_PCI_DEV_OC:
+               lpfc_io_resume_s4(pdev);
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1429 Invalid PCI device group: 0x%x\n",
+                               phba->pci_dev_grp);
+               break;
+       }
+       return;
 }
 
 static struct pci_device_id lpfc_id_table[] = {
@@ -3469,6 +7939,10 @@ static struct pci_device_id lpfc_id_table[] = {
                PCI_ANY_ID, PCI_ANY_ID, },
        {PCI_VENDOR_ID_EMULEX, PCI_DEVICE_ID_PROTEUS_S,
                PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK,
+               PCI_ANY_ID, PCI_ANY_ID, },
+       {PCI_VENDOR_ID_SERVERENGINE, PCI_DEVICE_ID_TIGERSHARK_S,
+               PCI_ANY_ID, PCI_ANY_ID, },
        { 0 }
 };
 
@@ -3486,7 +7960,7 @@ static struct pci_driver lpfc_driver = {
        .probe          = lpfc_pci_probe_one,
        .remove         = __devexit_p(lpfc_pci_remove_one),
        .suspend        = lpfc_pci_suspend_one,
-       .resume         = lpfc_pci_resume_one,
+       .resume         = lpfc_pci_resume_one,
        .err_handler    = &lpfc_err_handler,
 };
 
index 1aa85709b012d24826652d87e0c5da2121f1dda2..954ba57970a3ab294ef8db13d99ff798b6ca346c 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  *                                                                 *
  * included with this package.                                     *
  *******************************************************************/
 
-#define LOG_ELS                       0x1      /* ELS events */
-#define LOG_DISCOVERY                 0x2      /* Link discovery events */
-#define LOG_MBOX                      0x4      /* Mailbox events */
-#define LOG_INIT                      0x8      /* Initialization events */
-#define LOG_LINK_EVENT                0x10     /* Link events */
-#define LOG_IP                        0x20     /* IP traffic history */
-#define LOG_FCP                       0x40     /* FCP traffic history */
-#define LOG_NODE                      0x80     /* Node table events */
-#define LOG_TEMP                      0x100    /* Temperature sensor events */
-#define LOG_BG                       0x200     /* BlockGuard events */
-#define LOG_MISC                      0x400    /* Miscellaneous events */
-#define LOG_SLI                       0x800    /* SLI events */
-#define LOG_FCP_ERROR                 0x1000   /* log errors, not underruns */
-#define LOG_LIBDFC                    0x2000   /* Libdfc events */
-#define LOG_VPORT                     0x4000   /* NPIV events */
-#define LOG_ALL_MSG                   0xffff   /* LOG all messages */
+#define LOG_ELS                0x00000001      /* ELS events */
+#define LOG_DISCOVERY  0x00000002      /* Link discovery events */
+#define LOG_MBOX       0x00000004      /* Mailbox events */
+#define LOG_INIT       0x00000008      /* Initialization events */
+#define LOG_LINK_EVENT 0x00000010      /* Link events */
+#define LOG_IP         0x00000020      /* IP traffic history */
+#define LOG_FCP                0x00000040      /* FCP traffic history */
+#define LOG_NODE       0x00000080      /* Node table events */
+#define LOG_TEMP       0x00000100      /* Temperature sensor events */
+#define LOG_BG         0x00000200      /* BlockGuard events */
+#define LOG_MISC       0x00000400      /* Miscellaneous events */
+#define LOG_SLI                0x00000800      /* SLI events */
+#define LOG_FCP_ERROR  0x00001000      /* log errors, not underruns */
+#define LOG_LIBDFC     0x00002000      /* Libdfc events */
+#define LOG_VPORT      0x00004000      /* NPIV events */
+#define LOF_SECURITY   0x00008000      /* Security events */
+#define LOG_EVENT      0x00010000      /* CT,TEMP,DUMP, logging */
+#define LOG_ALL_MSG    0xffffffff      /* LOG all messages */
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
-       do { \
-       { if (((mask) &(vport)->cfg_log_verbose) || (level[1] <= '3')) \
+do { \
+       { if (((mask) & (vport)->cfg_log_verbose) || (level[1] <= '3')) \
                dev_printk(level, &((vport)->phba->pcidev)->dev, "%d:(%d):" \
                           fmt, (vport)->phba->brd_no, vport->vpi, ##arg); } \
-       } while (0)
+} while (0)
 
 #define lpfc_printf_log(phba, level, mask, fmt, arg...) \
-       do { \
-       { if (((mask) &(phba)->pport->cfg_log_verbose) || (level[1] <= '3')) \
+do { \
+       { uint32_t log_verbose = (phba)->pport ? \
+                                (phba)->pport->cfg_log_verbose : \
+                                (phba)->cfg_log_verbose; \
+         if (((mask) & log_verbose) || (level[1] <= '3')) \
                dev_printk(level, &((phba)->pcidev)->dev, "%d:" \
-                          fmt, phba->brd_no, ##arg); } \
-       } while (0)
+                          fmt, phba->brd_no, ##arg); \
+       } \
+} while (0)
index 134fc7fc2127454ccf98083cdd375d2f39400158..b9b451c090101565d507f34c931009a0c25f15e8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 
 #include <scsi/scsi.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_crtn.h"
 #include "lpfc_compat.h"
 
+/**
+ * lpfc_dump_static_vport - Dump HBA's static vport information.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset for dumping vport info.
+ *
+ * The dump mailbox command provides a method for the device driver to obtain
+ * various types of information from the HBA device.
+ *
+ * This routine prepares the mailbox command for dumping list of static
+ * vports to be created.
+ **/
+void
+lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
+               uint16_t offset)
+{
+       MAILBOX_t *mb;
+       void *ctx;
+
+       mb = &pmb->u.mb;
+       ctx = pmb->context2;
+
+       /* Setup to dump vport info region */
+       memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+       mb->mbxCommand = MBX_DUMP_MEMORY;
+       mb->un.varDmp.cv = 1;
+       mb->un.varDmp.type = DMP_NV_PARAMS;
+       mb->un.varDmp.entry_index = offset;
+       mb->un.varDmp.region_id = DMP_REGION_VPORT;
+       mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+       mb->un.varDmp.co = 0;
+       mb->un.varDmp.resp_offset = 0;
+       pmb->context2 = ctx;
+       mb->mbxOwner = OWN_HOST;
+
+       return;
+}
+
 /**
  * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
  * @phba: pointer to lpfc hba data structure.
@@ -58,7 +98,7 @@ lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
        MAILBOX_t *mb;
        void *ctx;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        ctx = pmb->context2;
 
        /* Setup to dump VPD region */
@@ -90,7 +130,7 @@ lpfc_dump_wakeup_param(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
        MAILBOX_t *mb;
        void *ctx;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        /* Save context so that we can restore after memset */
        ctx = pmb->context2;
 
@@ -125,7 +165,7 @@ lpfc_read_nv(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
        mb->mbxCommand = MBX_READ_NV;
        mb->mbxOwner = OWN_HOST;
@@ -151,7 +191,7 @@ lpfc_config_async(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb,
 {
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
        mb->mbxCommand = MBX_ASYNCEVT_ENABLE;
        mb->un.varCfgAsyncEvent.ring = ring;
@@ -177,7 +217,7 @@ lpfc_heart_beat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
        mb->mbxCommand = MBX_HEARTBEAT;
        mb->mbxOwner = OWN_HOST;
@@ -211,7 +251,7 @@ lpfc_read_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, struct lpfc_dmabuf *mp)
        struct lpfc_sli *psli;
 
        psli = &phba->sli;
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        INIT_LIST_HEAD(&mp->list);
@@ -248,7 +288,7 @@ lpfc_clear_la(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->un.varClearLA.eventTag = phba->fc_eventTag;
@@ -275,7 +315,7 @@ void
 lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
        struct lpfc_vport  *vport = phba->pport;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        /* NEW_FEATURE
@@ -321,7 +361,7 @@ lpfc_config_link(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 int
 lpfc_config_msi(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        uint32_t attentionConditions[2];
 
        /* Sanity check */
@@ -405,7 +445,7 @@ lpfc_init_link(struct lpfc_hba * phba,
        struct lpfc_sli *psli;
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        psli = &phba->sli;
@@ -492,7 +532,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
        struct lpfc_sli *psli;
 
        psli = &phba->sli;
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->mbxOwner = OWN_HOST;
@@ -515,7 +555,7 @@ lpfc_read_sparam(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, int vpi)
        mb->un.varRdSparm.un.sp64.tus.f.bdeSize = sizeof (struct serv_parm);
        mb->un.varRdSparm.un.sp64.addrHigh = putPaddrHigh(mp->phys);
        mb->un.varRdSparm.un.sp64.addrLow = putPaddrLow(mp->phys);
-       mb->un.varRdSparm.vpi = vpi;
+       mb->un.varRdSparm.vpi = vpi + phba->vpi_base;
 
        /* save address for completion */
        pmb->context1 = mp;
@@ -544,10 +584,12 @@ lpfc_unreg_did(struct lpfc_hba * phba, uint16_t vpi, uint32_t did,
 {
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->un.varUnregDID.did = did;
+       if (vpi != 0xffff)
+               vpi += phba->vpi_base;
        mb->un.varUnregDID.vpi = vpi;
 
        mb->mbxCommand = MBX_UNREG_D_ID;
@@ -573,7 +615,7 @@ lpfc_read_config(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->mbxCommand = MBX_READ_CONFIG;
@@ -598,7 +640,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->mbxCommand = MBX_READ_LNK_STAT;
@@ -607,7 +649,7 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 }
 
 /**
- * lpfc_reg_login - Prepare a mailbox command for registering remote login
+ * lpfc_reg_rpi - Prepare a mailbox command for registering remote login
  * @phba: pointer to lpfc hba data structure.
  * @vpi: virtual N_Port identifier.
  * @did: remote port identifier.
@@ -631,17 +673,23 @@ lpfc_read_lnk_stat(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
  *    1 - DMA memory allocation failed
  **/
 int
-lpfc_reg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
+lpfc_reg_rpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t did,
               uint8_t *param, LPFC_MBOXQ_t *pmb, uint32_t flag)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        uint8_t *sparam;
        struct lpfc_dmabuf *mp;
 
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->un.varRegLogin.rpi = 0;
-       mb->un.varRegLogin.vpi = vpi;
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               mb->un.varRegLogin.rpi = lpfc_sli4_alloc_rpi(phba);
+               if (mb->un.varRegLogin.rpi == LPFC_RPI_ALLOC_ERROR)
+                       return 1;
+       }
+
+       mb->un.varRegLogin.vpi = vpi + phba->vpi_base;
        mb->un.varRegLogin.did = did;
        mb->un.varWords[30] = flag;     /* Set flag to issue action on cmpl */
 
@@ -697,15 +745,16 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
 {
        MAILBOX_t *mb;
 
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
        mb->un.varUnregLogin.rpi = (uint16_t) rpi;
        mb->un.varUnregLogin.rsvd1 = 0;
-       mb->un.varUnregLogin.vpi = vpi;
+       mb->un.varUnregLogin.vpi = vpi + phba->vpi_base;
 
        mb->mbxCommand = MBX_UNREG_LOGIN;
        mb->mbxOwner = OWN_HOST;
+
        return;
 }
 
@@ -725,15 +774,15 @@ lpfc_unreg_login(struct lpfc_hba *phba, uint16_t vpi, uint32_t rpi,
  * This routine prepares the mailbox command for registering a virtual N_Port.
  **/
 void
-lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
-            LPFC_MBOXQ_t *pmb)
+lpfc_reg_vpi(struct lpfc_vport *vport, LPFC_MBOXQ_t *pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
 
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
-       mb->un.varRegVpi.vpi = vpi;
-       mb->un.varRegVpi.sid = sid;
+       mb->un.varRegVpi.vpi = vport->vpi + vport->phba->vpi_base;
+       mb->un.varRegVpi.sid = vport->fc_myDID;
+       mb->un.varRegVpi.vfi = vport->vfi + vport->phba->vfi_base;
 
        mb->mbxCommand = MBX_REG_VPI;
        mb->mbxOwner = OWN_HOST;
@@ -760,10 +809,10 @@ lpfc_reg_vpi(struct lpfc_hba *phba, uint16_t vpi, uint32_t sid,
 void
 lpfc_unreg_vpi(struct lpfc_hba *phba, uint16_t vpi, LPFC_MBOXQ_t *pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
 
-       mb->un.varUnregVpi.vpi = vpi;
+       mb->un.varUnregVpi.vpi = vpi + phba->vpi_base;
 
        mb->mbxCommand = MBX_UNREG_VPI;
        mb->mbxOwner = OWN_HOST;
@@ -852,7 +901,7 @@ lpfc_config_pcb_setup(struct lpfc_hba * phba)
 void
 lpfc_read_rev(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
        mb->un.varRdRev.cv = 1;
        mb->un.varRdRev.v3req = 1; /* Request SLI3 info */
@@ -945,7 +994,7 @@ lpfc_config_hbq(struct lpfc_hba *phba, uint32_t id,
                uint32_t hbq_entry_index, LPFC_MBOXQ_t *pmb)
 {
        int i;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        struct config_hbq_var *hbqmb = &mb->un.varCfgHbq;
 
        memset(pmb, 0, sizeof (LPFC_MBOXQ_t));
@@ -1020,7 +1069,7 @@ void
 lpfc_config_ring(struct lpfc_hba * phba, int ring, LPFC_MBOXQ_t * pmb)
 {
        int i;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        struct lpfc_sli *psli;
        struct lpfc_sli_ring *pring;
 
@@ -1075,7 +1124,7 @@ void
 lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        MAILBOX_t __iomem *mb_slim = (MAILBOX_t __iomem *) phba->MBslimaddr;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        dma_addr_t pdma_addr;
        uint32_t bar_low, bar_high;
        size_t offset;
@@ -1099,21 +1148,22 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 
        /* If HBA supports SLI=3 ask for it */
 
-       if (phba->sli_rev == 3 && phba->vpd.sli3Feat.cerbm) {
+       if (phba->sli_rev == LPFC_SLI_REV3 && phba->vpd.sli3Feat.cerbm) {
                if (phba->cfg_enable_bg)
                        mb->un.varCfgPort.cbg = 1; /* configure BlockGuard */
+               mb->un.varCfgPort.cdss = 1; /* Configure Security */
                mb->un.varCfgPort.cerbm = 1; /* Request HBQs */
                mb->un.varCfgPort.ccrp = 1; /* Command Ring Polling */
                mb->un.varCfgPort.cinb = 1; /* Interrupt Notification Block */
                mb->un.varCfgPort.max_hbq = lpfc_sli_hbq_count();
                if (phba->max_vpi && phba->cfg_enable_npiv &&
                    phba->vpd.sli3Feat.cmv) {
-                       mb->un.varCfgPort.max_vpi = phba->max_vpi;
+                       mb->un.varCfgPort.max_vpi = LPFC_MAX_VPI;
                        mb->un.varCfgPort.cmv = 1;
                } else
                        mb->un.varCfgPort.max_vpi = phba->max_vpi = 0;
        } else
-               phba->sli_rev = 2;
+               phba->sli_rev = LPFC_SLI_REV2;
        mb->un.varCfgPort.sli_mode = phba->sli_rev;
 
        /* Now setup pcb */
@@ -1245,7 +1295,7 @@ lpfc_config_port(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 void
 lpfc_kill_board(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb)
 {
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
 
        memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
        mb->mbxCommand = MBX_KILL_BOARD;
@@ -1304,29 +1354,98 @@ lpfc_mbox_get(struct lpfc_hba * phba)
        return mbq;
 }
 
+/**
+ * __lpfc_mbox_cmpl_put - Put mailbox cmd into mailbox cmd complete list
+ * @phba: pointer to lpfc hba data structure.
+ * @mbq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine put the completed mailbox command into the mailbox command
+ * complete list. This is the unlocked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
+ **/
+void
+__lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
+{
+       list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+}
+
 /**
  * lpfc_mbox_cmpl_put - Put mailbox command into mailbox command complete list
  * @phba: pointer to lpfc hba data structure.
  * @mbq: pointer to the driver internal queue element for mailbox command.
  *
  * This routine put the completed mailbox command into the mailbox command
- * complete list. This routine is called from driver interrupt handler
- * context.The mailbox complete list is used by the driver worker thread
- * to process mailbox complete callback functions outside the driver interrupt
- * handler.
+ * complete list. This is the locked version of the routine. The mailbox
+ * complete list is used by the driver worker thread to process mailbox
+ * complete callback functions outside the driver interrupt handler.
  **/
 void
-lpfc_mbox_cmpl_put(struct lpfc_hba * phba, LPFC_MBOXQ_t * mbq)
+lpfc_mbox_cmpl_put(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbq)
 {
        unsigned long iflag;
 
        /* This function expects to be called from interrupt context */
        spin_lock_irqsave(&phba->hbalock, iflag);
-       list_add_tail(&mbq->list, &phba->sli.mboxq_cmpl);
+       __lpfc_mbox_cmpl_put(phba, mbq);
        spin_unlock_irqrestore(&phba->hbalock, iflag);
        return;
 }
 
+/**
+ * lpfc_mbox_cmd_check - Check the validality of a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is to check whether a mailbox command is valid to be issued.
+ * This check will be performed by both the mailbox issue API when a client
+ * is to issue a mailbox command to the mailbox transport.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_cmd_check(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       /* Mailbox command that have a completion handler must also have a
+        * vport specified.
+        */
+       if (mboxq->mbox_cmpl && mboxq->mbox_cmpl != lpfc_sli_def_mbox_cmpl &&
+           mboxq->mbox_cmpl != lpfc_sli_wake_mbox_wait) {
+               if (!mboxq->vport) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_VPORT,
+                                       "1814 Mbox x%x failed, no vport\n",
+                                       mboxq->u.mb.mbxCommand);
+                       dump_stack();
+                       return -ENODEV;
+               }
+       }
+       return 0;
+}
+
+/**
+ * lpfc_mbox_dev_check - Check the device state for issuing a mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to check whether the HBA device is ready for posting a
+ * mailbox command. It is used by the mailbox transport API at the time the
+ * to post a mailbox command to the device.
+ *
+ * Return 0 - pass the check, -ENODEV - fail the check
+ **/
+int
+lpfc_mbox_dev_check(struct lpfc_hba *phba)
+{
+       /* If the PCI channel is in offline state, do not issue mbox */
+       if (unlikely(pci_channel_offline(phba->pcidev)))
+               return -ENODEV;
+
+       /* If the HBA is in error state, do not issue mbox */
+       if (phba->link_state == LPFC_HBA_ERROR)
+               return -ENODEV;
+
+       return 0;
+}
+
 /**
  * lpfc_mbox_tmo_val - Retrieve mailbox command timeout value
  * @phba: pointer to lpfc hba data structure.
@@ -1350,6 +1469,475 @@ lpfc_mbox_tmo_val(struct lpfc_hba *phba, int cmd)
        case MBX_WRITE_WWN:     /* 0x98 */
        case MBX_LOAD_EXP_ROM:  /* 0x9C */
                return LPFC_MBOX_TMO_FLASH_CMD;
+       case MBX_SLI4_CONFIG:   /* 0x9b */
+               return LPFC_MBOX_SLI4_CONFIG_TMO;
        }
        return LPFC_MBOX_TMO;
 }
+
+/**
+ * lpfc_sli4_mbx_sge_set - Set a sge entry in non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ * @phyaddr: physical address for the sge
+ * @length: Length of the sge.
+ *
+ * This routine sets up an entry in the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_set(struct lpfcMboxq *mbox, uint32_t sgentry,
+                     dma_addr_t phyaddr, uint32_t length)
+{
+       struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+       nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+                               &mbox->u.mqe.un.nembed_cmd;
+       nembed_sge->sge[sgentry].pa_lo = putPaddrLow(phyaddr);
+       nembed_sge->sge[sgentry].pa_hi = putPaddrHigh(phyaddr);
+       nembed_sge->sge[sgentry].length = length;
+}
+
+/**
+ * lpfc_sli4_mbx_sge_get - Get a sge entry from non-embedded mailbox command
+ * @mbox: pointer to lpfc mbox command.
+ * @sgentry: sge entry index.
+ *
+ * This routine gets an entry from the non-embedded mailbox command at the sge
+ * index location.
+ **/
+void
+lpfc_sli4_mbx_sge_get(struct lpfcMboxq *mbox, uint32_t sgentry,
+                     struct lpfc_mbx_sge *sge)
+{
+       struct lpfc_mbx_nembed_cmd *nembed_sge;
+
+       nembed_sge = (struct lpfc_mbx_nembed_cmd *)
+                               &mbox->u.mqe.un.nembed_cmd;
+       sge->pa_lo = nembed_sge->sge[sgentry].pa_lo;
+       sge->pa_hi = nembed_sge->sge[sgentry].pa_hi;
+       sge->length = nembed_sge->sge[sgentry].length;
+}
+
+/**
+ * lpfc_sli4_mbox_cmd_free - Free a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine frees SLI4 specific mailbox command for sending IOCTL command.
+ **/
+void
+lpfc_sli4_mbox_cmd_free(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+       struct lpfc_mbx_sli4_config *sli4_cfg;
+       struct lpfc_mbx_sge sge;
+       dma_addr_t phyaddr;
+       uint32_t sgecount, sgentry;
+
+       sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+       /* For embedded mbox command, just free the mbox command */
+       if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+               mempool_free(mbox, phba->mbox_mem_pool);
+               return;
+       }
+
+       /* For non-embedded mbox command, we need to free the pages first */
+       sgecount = bf_get(lpfc_mbox_hdr_sge_cnt, &sli4_cfg->header.cfg_mhdr);
+       /* There is nothing we can do if there is no sge address array */
+       if (unlikely(!mbox->sge_array)) {
+               mempool_free(mbox, phba->mbox_mem_pool);
+               return;
+       }
+       /* Each non-embedded DMA memory was allocated in the length of a page */
+       for (sgentry = 0; sgentry < sgecount; sgentry++) {
+               lpfc_sli4_mbx_sge_get(mbox, sgentry, &sge);
+               phyaddr = getPaddr(sge.pa_hi, sge.pa_lo);
+               dma_free_coherent(&phba->pcidev->dev, PAGE_SIZE,
+                                 mbox->sge_array->addr[sgentry], phyaddr);
+       }
+       /* Free the sge address array memory */
+       kfree(mbox->sge_array);
+       /* Finally, free the mailbox command itself */
+       mempool_free(mbox, phba->mbox_mem_pool);
+}
+
+/**
+ * lpfc_sli4_config - Initialize the  SLI4 Config Mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ * @subsystem: The sli4 config sub mailbox subsystem.
+ * @opcode: The sli4 config sub mailbox command opcode.
+ * @length: Length of the sli4 config mailbox command.
+ *
+ * This routine sets up the header fields of SLI4 specific mailbox command
+ * for sending IOCTL command.
+ *
+ * Return: the actual length of the mbox command allocated (mostly useful
+ *         for none embedded mailbox command).
+ **/
+int
+lpfc_sli4_config(struct lpfc_hba *phba, struct lpfcMboxq *mbox,
+                uint8_t subsystem, uint8_t opcode, uint32_t length, bool emb)
+{
+       struct lpfc_mbx_sli4_config *sli4_config;
+       union lpfc_sli4_cfg_shdr *cfg_shdr = NULL;
+       uint32_t alloc_len;
+       uint32_t resid_len;
+       uint32_t pagen, pcount;
+       void *viraddr;
+       dma_addr_t phyaddr;
+
+       /* Set up SLI4 mailbox command header fields */
+       memset(mbox, 0, sizeof(*mbox));
+       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_SLI4_CONFIG);
+
+       /* Set up SLI4 ioctl command header fields */
+       sli4_config = &mbox->u.mqe.un.sli4_config;
+
+       /* Setup for the embedded mbox command */
+       if (emb) {
+               /* Set up main header fields */
+               bf_set(lpfc_mbox_hdr_emb, &sli4_config->header.cfg_mhdr, 1);
+               sli4_config->header.cfg_mhdr.payload_length =
+                                       LPFC_MBX_CMD_HDR_LENGTH + length;
+               /* Set up sub-header fields following main header */
+               bf_set(lpfc_mbox_hdr_opcode,
+                       &sli4_config->header.cfg_shdr.request, opcode);
+               bf_set(lpfc_mbox_hdr_subsystem,
+                       &sli4_config->header.cfg_shdr.request, subsystem);
+               sli4_config->header.cfg_shdr.request.request_length = length;
+               return length;
+       }
+
+       /* Setup for the none-embedded mbox command */
+       pcount = (PAGE_ALIGN(length))/PAGE_SIZE;
+       pcount = (pcount > LPFC_SLI4_MBX_SGE_MAX_PAGES) ?
+                               LPFC_SLI4_MBX_SGE_MAX_PAGES : pcount;
+       /* Allocate record for keeping SGE virtual addresses */
+       mbox->sge_array = kmalloc(sizeof(struct lpfc_mbx_nembed_sge_virt),
+                                 GFP_KERNEL);
+       if (!mbox->sge_array)
+               return 0;
+
+       for (pagen = 0, alloc_len = 0; pagen < pcount; pagen++) {
+               /* The DMA memory is always allocated in the length of a
+                * page even though the last SGE might not fill up to a
+                * page, this is used as a priori size of PAGE_SIZE for
+                * the later DMA memory free.
+                */
+               viraddr = dma_alloc_coherent(&phba->pcidev->dev, PAGE_SIZE,
+                                            &phyaddr, GFP_KERNEL);
+               /* In case of malloc fails, proceed with whatever we have */
+               if (!viraddr)
+                       break;
+               mbox->sge_array->addr[pagen] = viraddr;
+               /* Keep the first page for later sub-header construction */
+               if (pagen == 0)
+                       cfg_shdr = (union lpfc_sli4_cfg_shdr *)viraddr;
+               resid_len = length - alloc_len;
+               if (resid_len > PAGE_SIZE) {
+                       lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+                                             PAGE_SIZE);
+                       alloc_len += PAGE_SIZE;
+               } else {
+                       lpfc_sli4_mbx_sge_set(mbox, pagen, phyaddr,
+                                             resid_len);
+                       alloc_len = length;
+               }
+       }
+
+       /* Set up main header fields in mailbox command */
+       sli4_config->header.cfg_mhdr.payload_length = alloc_len;
+       bf_set(lpfc_mbox_hdr_sge_cnt, &sli4_config->header.cfg_mhdr, pagen);
+
+       /* Set up sub-header fields into the first page */
+       if (pagen > 0) {
+               bf_set(lpfc_mbox_hdr_opcode, &cfg_shdr->request, opcode);
+               bf_set(lpfc_mbox_hdr_subsystem, &cfg_shdr->request, subsystem);
+               cfg_shdr->request.request_length =
+                               alloc_len - sizeof(union  lpfc_sli4_cfg_shdr);
+       }
+       /* The sub-header is in DMA memory, which needs endian converstion */
+       lpfc_sli_pcimem_bcopy(cfg_shdr, cfg_shdr,
+                             sizeof(union  lpfc_sli4_cfg_shdr));
+
+       return alloc_len;
+}
+
+/**
+ * lpfc_sli4_mbox_opcode_get - Get the opcode from a sli4 mailbox command
+ * @phba: pointer to lpfc hba data structure.
+ * @mbox: pointer to lpfc mbox command.
+ *
+ * This routine gets the opcode from a SLI4 specific mailbox command for
+ * sending IOCTL command. If the mailbox command is not MBX_SLI4_CONFIG
+ * (0x9B) or if the IOCTL sub-header is not present, opcode 0x0 shall be
+ * returned.
+ **/
+uint8_t
+lpfc_sli4_mbox_opcode_get(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+       struct lpfc_mbx_sli4_config *sli4_cfg;
+       union lpfc_sli4_cfg_shdr *cfg_shdr;
+
+       if (mbox->u.mb.mbxCommand != MBX_SLI4_CONFIG)
+               return 0;
+       sli4_cfg = &mbox->u.mqe.un.sli4_config;
+
+       /* For embedded mbox command, get opcode from embedded sub-header*/
+       if (bf_get(lpfc_mbox_hdr_emb, &sli4_cfg->header.cfg_mhdr)) {
+               cfg_shdr = &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+               return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+       }
+
+       /* For non-embedded mbox command, get opcode from first dma page */
+       if (unlikely(!mbox->sge_array))
+               return 0;
+       cfg_shdr = (union lpfc_sli4_cfg_shdr *)mbox->sge_array->addr[0];
+       return bf_get(lpfc_mbox_hdr_opcode, &cfg_shdr->request);
+}
+
+/**
+ * lpfc_request_features: Configure SLI4 REQUEST_FEATURES mailbox
+ * @mboxq: pointer to lpfc mbox command.
+ *
+ * This routine sets up the mailbox for an SLI4 REQUEST_FEATURES
+ * mailbox command.
+ **/
+void
+lpfc_request_features(struct lpfc_hba *phba, struct lpfcMboxq *mboxq)
+{
+       /* Set up SLI4 mailbox command header fields */
+       memset(mboxq, 0, sizeof(LPFC_MBOXQ_t));
+       bf_set(lpfc_mqe_command, &mboxq->u.mqe, MBX_SLI4_REQ_FTRS);
+
+       /* Set up host requested features. */
+       bf_set(lpfc_mbx_rq_ftr_rq_fcpi, &mboxq->u.mqe.un.req_ftrs, 1);
+
+       /* Virtual fabrics and FIPs are not supported yet. */
+       bf_set(lpfc_mbx_rq_ftr_rq_ifip, &mboxq->u.mqe.un.req_ftrs, 0);
+
+       /* Enable DIF (block guard) only if configured to do so. */
+       if (phba->cfg_enable_bg)
+               bf_set(lpfc_mbx_rq_ftr_rq_dif, &mboxq->u.mqe.un.req_ftrs, 1);
+
+       /* Enable NPIV only if configured to do so. */
+       if (phba->max_vpi && phba->cfg_enable_npiv)
+               bf_set(lpfc_mbx_rq_ftr_rq_npiv, &mboxq->u.mqe.un.req_ftrs, 1);
+
+       return;
+}
+
+/**
+ * lpfc_init_vfi - Initialize the INIT_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: Vport associated with the VF.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport. INIT_VFI configures virtual fabrics identified by VFI
+ * in the context of an FCF. The driver issues this command to setup a VFI
+ * before issuing a FLOGI to login to the VSAN. The driver should also issue a
+ * REG_VFI after a successful VSAN login.
+ **/
+void
+lpfc_init_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport)
+{
+       struct lpfc_mbx_init_vfi *init_vfi;
+
+       memset(mbox, 0, sizeof(*mbox));
+       init_vfi = &mbox->u.mqe.un.init_vfi;
+       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VFI);
+       bf_set(lpfc_init_vfi_vr, init_vfi, 1);
+       bf_set(lpfc_init_vfi_vt, init_vfi, 1);
+       bf_set(lpfc_init_vfi_vfi, init_vfi, vport->vfi + vport->phba->vfi_base);
+       bf_set(lpfc_init_vfi_fcfi, init_vfi, vport->phba->fcf.fcfi);
+}
+
+/**
+ * lpfc_reg_vfi - Initialize the REG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vport: vport associated with the VF.
+ * @phys: BDE DMA bus address used to send the service parameters to the HBA.
+ *
+ * This routine initializes @mbox to all zeros and then fills in the mailbox
+ * fields from @vport, and uses @buf as a DMAable buffer to send the vport's
+ * fc service parameters to the HBA for this VFI. REG_VFI configures virtual
+ * fabrics identified by VFI in the context of an FCF.
+ **/
+void
+lpfc_reg_vfi(struct lpfcMboxq *mbox, struct lpfc_vport *vport, dma_addr_t phys)
+{
+       struct lpfc_mbx_reg_vfi *reg_vfi;
+
+       memset(mbox, 0, sizeof(*mbox));
+       reg_vfi = &mbox->u.mqe.un.reg_vfi;
+       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_VFI);
+       bf_set(lpfc_reg_vfi_vp, reg_vfi, 1);
+       bf_set(lpfc_reg_vfi_vfi, reg_vfi, vport->vfi + vport->phba->vfi_base);
+       bf_set(lpfc_reg_vfi_fcfi, reg_vfi, vport->phba->fcf.fcfi);
+       bf_set(lpfc_reg_vfi_vpi, reg_vfi, vport->vpi + vport->phba->vpi_base);
+       reg_vfi->bde.addrHigh = putPaddrHigh(phys);
+       reg_vfi->bde.addrLow = putPaddrLow(phys);
+       reg_vfi->bde.tus.f.bdeSize = sizeof(vport->fc_sparam);
+       reg_vfi->bde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+       bf_set(lpfc_reg_vfi_nport_id, reg_vfi, vport->fc_myDID);
+}
+
+/**
+ * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vpi: VPI to be initialized.
+ *
+ * The INIT_VPI mailbox command supports virtual N_Ports. The driver uses the
+ * command to activate a virtual N_Port. The HBA assigns a MAC address to use
+ * with the virtual N Port.  The SLI Host issues this command before issuing a
+ * FDISC to connect to the Fabric. The SLI Host should issue a REG_VPI after a
+ * successful virtual NPort login.
+ **/
+void
+lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
+{
+       memset(mbox, 0, sizeof(*mbox));
+       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
+       bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
+}
+
+/**
+ * lpfc_unreg_vfi - Initialize the UNREG_VFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @vfi: VFI to be unregistered.
+ *
+ * The UNREG_VFI mailbox command causes the SLI Host to put a virtual fabric
+ * (logical NPort) into the inactive state. The SLI Host must have logged out
+ * and unregistered all remote N_Ports to abort any activity on the virtual
+ * fabric. The SLI Port posts the mailbox response after marking the virtual
+ * fabric inactive.
+ **/
+void
+lpfc_unreg_vfi(struct lpfcMboxq *mbox, uint16_t vfi)
+{
+       memset(mbox, 0, sizeof(*mbox));
+       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_VFI);
+       bf_set(lpfc_unreg_vfi_vfi, &mbox->u.mqe.un.unreg_vfi, vfi);
+}
+
+/**
+ * lpfc_dump_fcoe_param - Dump config region 23 to get FCoe parameters.
+ * @phba: pointer to the hba structure containing.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * This function create a SLI4 dump mailbox command to dump FCoE
+ * parameters stored in region 23.
+ **/
+int
+lpfc_dump_fcoe_param(struct lpfc_hba *phba,
+               struct lpfcMboxq *mbox)
+{
+       struct lpfc_dmabuf *mp = NULL;
+       MAILBOX_t *mb;
+
+       memset(mbox, 0, sizeof(*mbox));
+       mb = &mbox->u.mb;
+
+       mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (mp)
+               mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+       if (!mp || !mp->virt) {
+               kfree(mp);
+               /* dump_fcoe_param failed to allocate memory */
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
+                       "2569 lpfc_dump_fcoe_param: memory"
+                       " allocation failed \n");
+               return 1;
+       }
+
+       memset(mp->virt, 0, LPFC_BPL_SIZE);
+       INIT_LIST_HEAD(&mp->list);
+
+       /* save address for completion */
+       mbox->context1 = (uint8_t *) mp;
+
+       mb->mbxCommand = MBX_DUMP_MEMORY;
+       mb->un.varDmp.type = DMP_NV_PARAMS;
+       mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
+       mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
+       mb->un.varWords[3] = putPaddrLow(mp->phys);
+       mb->un.varWords[4] = putPaddrHigh(mp->phys);
+       return 0;
+}
+
+/**
+ * lpfc_reg_fcfi - Initialize the REG_FCFI mailbox command
+ * @phba: pointer to the hba structure containing the FCF index and RQ ID.
+ * @mbox: pointer to lpfc mbox command to initialize.
+ *
+ * The REG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs). The
+ * SLI Host uses the command to activate an FCF after it has acquired FCF
+ * information via a READ_FCF mailbox command. This mailbox command also is used
+ * to indicate where received unsolicited frames from this FCF will be sent. By
+ * default this routine will set up the FCF to forward all unsolicited frames
+ * the the RQ ID passed in the @phba. This can be overridden by the caller for
+ * more complicated setups.
+ **/
+void
+lpfc_reg_fcfi(struct lpfc_hba *phba, struct lpfcMboxq *mbox)
+{
+       struct lpfc_mbx_reg_fcfi *reg_fcfi;
+
+       memset(mbox, 0, sizeof(*mbox));
+       reg_fcfi = &mbox->u.mqe.un.reg_fcfi;
+       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_REG_FCFI);
+       bf_set(lpfc_reg_fcfi_rq_id0, reg_fcfi, phba->sli4_hba.hdr_rq->queue_id);
+       bf_set(lpfc_reg_fcfi_rq_id1, reg_fcfi, REG_FCF_INVALID_QID);
+       bf_set(lpfc_reg_fcfi_rq_id2, reg_fcfi, REG_FCF_INVALID_QID);
+       bf_set(lpfc_reg_fcfi_rq_id3, reg_fcfi, REG_FCF_INVALID_QID);
+       bf_set(lpfc_reg_fcfi_info_index, reg_fcfi, phba->fcf.fcf_indx);
+       /* reg_fcf addr mode is bit wise inverted value of fcf addr_mode */
+       bf_set(lpfc_reg_fcfi_mam, reg_fcfi,
+               (~phba->fcf.addr_mode) & 0x3);
+       if (phba->fcf.fcf_flag & FCF_VALID_VLAN) {
+               bf_set(lpfc_reg_fcfi_vv, reg_fcfi, 1);
+               bf_set(lpfc_reg_fcfi_vlan_tag, reg_fcfi, phba->fcf.vlan_id);
+       }
+}
+
+/**
+ * lpfc_unreg_fcfi - Initialize the UNREG_FCFI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @fcfi: FCFI to be unregistered.
+ *
+ * The UNREG_FCFI mailbox command supports Fibre Channel Forwarders (FCFs).
+ * The SLI Host uses the command to inactivate an FCFI.
+ **/
+void
+lpfc_unreg_fcfi(struct lpfcMboxq *mbox, uint16_t fcfi)
+{
+       memset(mbox, 0, sizeof(*mbox));
+       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_UNREG_FCFI);
+       bf_set(lpfc_unreg_fcfi, &mbox->u.mqe.un.unreg_fcfi, fcfi);
+}
+
+/**
+ * lpfc_resume_rpi - Initialize the RESUME_RPI mailbox command
+ * @mbox: pointer to lpfc mbox command to initialize.
+ * @ndlp: The nodelist structure that describes the RPI to resume.
+ *
+ * The RESUME_RPI mailbox command is used to restart I/O to an RPI after a
+ * link event.
+ **/
+void
+lpfc_resume_rpi(struct lpfcMboxq *mbox, struct lpfc_nodelist *ndlp)
+{
+       struct lpfc_mbx_resume_rpi *resume_rpi;
+
+       memset(mbox, 0, sizeof(*mbox));
+       resume_rpi = &mbox->u.mqe.un.resume_rpi;
+       bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
+       bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
+       bf_set(lpfc_resume_rpi_vpi, resume_rpi,
+              ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
+       bf_set(lpfc_resume_rpi_vfi, resume_rpi,
+              ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
+}
index 35a976733398827968083062e2377543d0c1c8a3..e198c917c13ecc55d91839d8ca64a8051ee7f097 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 
 #include <scsi/scsi.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -45,7 +47,7 @@
  * @phba: HBA to allocate pools for
  *
  * Description: Creates and allocates PCI pools lpfc_scsi_dma_buf_pool,
- * lpfc_mbuf_pool, lpfc_hbq_pool.  Creates and allocates kmalloc-backed mempools
+ * lpfc_mbuf_pool, lpfc_hrb_pool.  Creates and allocates kmalloc-backed mempools
  * for LPFC_MBOXQ_t and lpfc_nodelist.  Also allocates the VPI bitmask.
  *
  * Notes: Not interrupt-safe.  Must be called with no locks held.  If any
  *   -ENOMEM on failure (if any memory allocations fail)
  **/
 int
-lpfc_mem_alloc(struct lpfc_hba * phba)
+lpfc_mem_alloc(struct lpfc_hba *phba, int align)
 {
        struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
        int longs;
        int i;
 
-       phba->lpfc_scsi_dma_buf_pool = pci_pool_create("lpfc_scsi_dma_buf_pool",
-                               phba->pcidev, phba->cfg_sg_dma_buf_size, 8, 0);
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               phba->lpfc_scsi_dma_buf_pool =
+                       pci_pool_create("lpfc_scsi_dma_buf_pool",
+                               phba->pcidev,
+                               phba->cfg_sg_dma_buf_size,
+                               phba->cfg_sg_dma_buf_size,
+                               0);
+       else
+               phba->lpfc_scsi_dma_buf_pool =
+                       pci_pool_create("lpfc_scsi_dma_buf_pool",
+                               phba->pcidev, phba->cfg_sg_dma_buf_size,
+                               align, 0);
        if (!phba->lpfc_scsi_dma_buf_pool)
                goto fail;
 
        phba->lpfc_mbuf_pool = pci_pool_create("lpfc_mbuf_pool", phba->pcidev,
-                                                       LPFC_BPL_SIZE, 8,0);
+                                                       LPFC_BPL_SIZE,
+                                                       align, 0);
        if (!phba->lpfc_mbuf_pool)
                goto fail_free_dma_buf_pool;
 
@@ -97,23 +110,31 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
                                                sizeof(struct lpfc_nodelist));
        if (!phba->nlp_mem_pool)
                goto fail_free_mbox_pool;
-
-       phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",phba->pcidev,
-                                             LPFC_BPL_SIZE, 8, 0);
-       if (!phba->lpfc_hbq_pool)
+       phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
+                                             phba->pcidev,
+                                             LPFC_HDR_BUF_SIZE, align, 0);
+       if (!phba->lpfc_hrb_pool)
                goto fail_free_nlp_mem_pool;
+       phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
+                                             phba->pcidev,
+                                             LPFC_DATA_BUF_SIZE, align, 0);
+       if (!phba->lpfc_drb_pool)
+               goto fail_free_hbq_pool;
 
        /* vpi zero is reserved for the physical port so add 1 to max */
        longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
        phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
        if (!phba->vpi_bmask)
-               goto fail_free_hbq_pool;
+               goto fail_free_dbq_pool;
 
        return 0;
 
+ fail_free_dbq_pool:
+       pci_pool_destroy(phba->lpfc_drb_pool);
+       phba->lpfc_drb_pool = NULL;
  fail_free_hbq_pool:
-       lpfc_sli_hbqbuf_free_all(phba);
-       pci_pool_destroy(phba->lpfc_hbq_pool);
+       pci_pool_destroy(phba->lpfc_hrb_pool);
+       phba->lpfc_hrb_pool = NULL;
  fail_free_nlp_mem_pool:
        mempool_destroy(phba->nlp_mem_pool);
        phba->nlp_mem_pool = NULL;
@@ -136,27 +157,73 @@ lpfc_mem_alloc(struct lpfc_hba * phba)
 }
 
 /**
- * lpfc_mem_free - Frees all PCI and memory allocated by lpfc_mem_alloc
+ * lpfc_mem_free - Frees memory allocated by lpfc_mem_alloc
  * @phba: HBA to free memory for
  *
- * Description: Frees PCI pools lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool,
- * lpfc_hbq_pool.  Frees kmalloc-backed mempools for LPFC_MBOXQ_t and
- * lpfc_nodelist.  Also frees the VPI bitmask
+ * Description: Free the memory allocated by lpfc_mem_alloc routine. This
+ * routine is a the counterpart of lpfc_mem_alloc.
  *
  * Returns: None
  **/
 void
-lpfc_mem_free(struct lpfc_hba * phba)
+lpfc_mem_free(struct lpfc_hba *phba)
 {
-       struct lpfc_sli *psli = &phba->sli;
-       struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
-       LPFC_MBOXQ_t *mbox, *next_mbox;
-       struct lpfc_dmabuf   *mp;
        int i;
+       struct lpfc_dma_pool *pool = &phba->lpfc_mbuf_safety_pool;
 
+       /* Free VPI bitmask memory */
        kfree(phba->vpi_bmask);
+
+       /* Free HBQ pools */
        lpfc_sli_hbqbuf_free_all(phba);
+       pci_pool_destroy(phba->lpfc_drb_pool);
+       phba->lpfc_drb_pool = NULL;
+       pci_pool_destroy(phba->lpfc_hrb_pool);
+       phba->lpfc_hrb_pool = NULL;
+
+       /* Free NLP memory pool */
+       mempool_destroy(phba->nlp_mem_pool);
+       phba->nlp_mem_pool = NULL;
+
+       /* Free mbox memory pool */
+       mempool_destroy(phba->mbox_mem_pool);
+       phba->mbox_mem_pool = NULL;
+
+       /* Free MBUF memory pool */
+       for (i = 0; i < pool->current_count; i++)
+               pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
+                             pool->elements[i].phys);
+       kfree(pool->elements);
+
+       pci_pool_destroy(phba->lpfc_mbuf_pool);
+       phba->lpfc_mbuf_pool = NULL;
 
+       /* Free DMA buffer memory pool */
+       pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
+       phba->lpfc_scsi_dma_buf_pool = NULL;
+
+       return;
+}
+
+/**
+ * lpfc_mem_free_all - Frees all PCI and driver memory
+ * @phba: HBA to free memory for
+ *
+ * Description: Free memory from PCI and driver memory pools and also those
+ * used : lpfc_scsi_dma_buf_pool, lpfc_mbuf_pool, lpfc_hrb_pool. Frees
+ * kmalloc-backed mempools for LPFC_MBOXQ_t and lpfc_nodelist. Also frees
+ * the VPI bitmask.
+ *
+ * Returns: None
+ **/
+void
+lpfc_mem_free_all(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       LPFC_MBOXQ_t *mbox, *next_mbox;
+       struct lpfc_dmabuf   *mp;
+
+       /* Free memory used in mailbox queue back to mailbox memory pool */
        list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq, list) {
                mp = (struct lpfc_dmabuf *) (mbox->context1);
                if (mp) {
@@ -166,6 +233,7 @@ lpfc_mem_free(struct lpfc_hba * phba)
                list_del(&mbox->list);
                mempool_free(mbox, phba->mbox_mem_pool);
        }
+       /* Free memory used in mailbox cmpl list back to mailbox memory pool */
        list_for_each_entry_safe(mbox, next_mbox, &psli->mboxq_cmpl, list) {
                mp = (struct lpfc_dmabuf *) (mbox->context1);
                if (mp) {
@@ -175,8 +243,10 @@ lpfc_mem_free(struct lpfc_hba * phba)
                list_del(&mbox->list);
                mempool_free(mbox, phba->mbox_mem_pool);
        }
-
+       /* Free the active mailbox command back to the mailbox memory pool */
+       spin_lock_irq(&phba->hbalock);
        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       spin_unlock_irq(&phba->hbalock);
        if (psli->mbox_active) {
                mbox = psli->mbox_active;
                mp = (struct lpfc_dmabuf *) (mbox->context1);
@@ -188,27 +258,14 @@ lpfc_mem_free(struct lpfc_hba * phba)
                psli->mbox_active = NULL;
        }
 
-       for (i = 0; i < pool->current_count; i++)
-               pci_pool_free(phba->lpfc_mbuf_pool, pool->elements[i].virt,
-                                                pool->elements[i].phys);
-       kfree(pool->elements);
-
-       pci_pool_destroy(phba->lpfc_hbq_pool);
-       mempool_destroy(phba->nlp_mem_pool);
-       mempool_destroy(phba->mbox_mem_pool);
-
-       pci_pool_destroy(phba->lpfc_scsi_dma_buf_pool);
-       pci_pool_destroy(phba->lpfc_mbuf_pool);
-
-       phba->lpfc_hbq_pool = NULL;
-       phba->nlp_mem_pool = NULL;
-       phba->mbox_mem_pool = NULL;
-       phba->lpfc_scsi_dma_buf_pool = NULL;
-       phba->lpfc_mbuf_pool = NULL;
+       /* Free and destroy all the allocated memory pools */
+       lpfc_mem_free(phba);
 
        /* Free the iocb lookup array */
        kfree(psli->iocbq_lookup);
        psli->iocbq_lookup = NULL;
+
+       return;
 }
 
 /**
@@ -305,7 +362,7 @@ lpfc_mbuf_free(struct lpfc_hba * phba, void *virt, dma_addr_t dma)
  * lpfc_els_hbq_alloc - Allocate an HBQ buffer
  * @phba: HBA to allocate HBQ buffer for
  *
- * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hbq_pool PCI
+ * Description: Allocates a DMA-mapped HBQ buffer from the lpfc_hrb_pool PCI
  * pool along a non-DMA-mapped container for it.
  *
  * Notes: Not interrupt-safe.  Must be called with no locks held.
@@ -323,7 +380,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
        if (!hbqbp)
                return NULL;
 
-       hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
+       hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
                                          &hbqbp->dbuf.phys);
        if (!hbqbp->dbuf.virt) {
                kfree(hbqbp);
@@ -334,7 +391,7 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_mem_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
+ * lpfc_els_hbq_free - Frees an HBQ buffer allocated with lpfc_els_hbq_alloc
  * @phba: HBA buffer was allocated for
  * @hbqbp: HBQ container returned by lpfc_els_hbq_alloc
  *
@@ -348,11 +405,72 @@ lpfc_els_hbq_alloc(struct lpfc_hba *phba)
 void
 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
 {
-       pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+       pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
        kfree(hbqbp);
        return;
 }
 
+/**
+ * lpfc_sli4_rb_alloc - Allocate an SLI4 Receive buffer
+ * @phba: HBA to allocate a receive buffer for
+ *
+ * Description: Allocates a DMA-mapped receive buffer from the lpfc_hrb_pool PCI
+ * pool along a non-DMA-mapped container for it.
+ *
+ * Notes: Not interrupt-safe.  Must be called with no locks held.
+ *
+ * Returns:
+ *   pointer to HBQ on success
+ *   NULL on failure
+ **/
+struct hbq_dmabuf *
+lpfc_sli4_rb_alloc(struct lpfc_hba *phba)
+{
+       struct hbq_dmabuf *dma_buf;
+
+       dma_buf = kmalloc(sizeof(struct hbq_dmabuf), GFP_KERNEL);
+       if (!dma_buf)
+               return NULL;
+
+       dma_buf->hbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+                                           &dma_buf->hbuf.phys);
+       if (!dma_buf->hbuf.virt) {
+               kfree(dma_buf);
+               return NULL;
+       }
+       dma_buf->dbuf.virt = pci_pool_alloc(phba->lpfc_drb_pool, GFP_KERNEL,
+                                           &dma_buf->dbuf.phys);
+       if (!dma_buf->dbuf.virt) {
+               pci_pool_free(phba->lpfc_hrb_pool, dma_buf->hbuf.virt,
+                             dma_buf->hbuf.phys);
+               kfree(dma_buf);
+               return NULL;
+       }
+       dma_buf->size = LPFC_BPL_SIZE;
+       return dma_buf;
+}
+
+/**
+ * lpfc_sli4_rb_free - Frees a receive buffer
+ * @phba: HBA buffer was allocated for
+ * @dmab: DMA Buffer container returned by lpfc_sli4_hbq_alloc
+ *
+ * Description: Frees both the container and the DMA-mapped buffers returned by
+ * lpfc_sli4_rb_alloc.
+ *
+ * Notes: Can be called with or without locks held.
+ *
+ * Returns: None
+ **/
+void
+lpfc_sli4_rb_free(struct lpfc_hba *phba, struct hbq_dmabuf *dmab)
+{
+       pci_pool_free(phba->lpfc_hrb_pool, dmab->hbuf.virt, dmab->hbuf.phys);
+       pci_pool_free(phba->lpfc_drb_pool, dmab->dbuf.virt, dmab->dbuf.phys);
+       kfree(dmab);
+       return;
+}
+
 /**
  * lpfc_in_buf_free - Free a DMA buffer
  * @phba: HBA buffer is associated with
index 08cdc77af41c12f42f34ab2da799f1f678811d7b..09f659f77bb3f2b399850215319eb9121489d181 100644 (file)
@@ -1,7 +1,7 @@
  /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -361,7 +363,7 @@ lpfc_rcv_plogi(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
        if (!mbox)
                goto out;
 
-       rc = lpfc_reg_login(phba, vport->vpi, icmd->un.rcvels.remoteID,
+       rc = lpfc_reg_rpi(phba, vport->vpi, icmd->un.rcvels.remoteID,
                            (uint8_t *) sp, mbox, 0);
        if (rc) {
                mempool_free(mbox, phba->mbox_mem_pool);
@@ -495,11 +497,19 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
                lpfc_els_rsp_acc(vport, ELS_CMD_PRLO, cmdiocb, ndlp, NULL);
        else
                lpfc_els_rsp_acc(vport, ELS_CMD_ACC, cmdiocb, ndlp, NULL);
+       if ((ndlp->nlp_type & NLP_FABRIC) &&
+               vport->port_type == LPFC_NPIV_PORT) {
+               lpfc_linkdown_port(vport);
+               mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
+               spin_lock_irq(shost->host_lock);
+               ndlp->nlp_flag |= NLP_DELAY_TMO;
+               spin_unlock_irq(shost->host_lock);
 
-       if ((!(ndlp->nlp_type & NLP_FABRIC) &&
-            ((ndlp->nlp_type & NLP_FCP_TARGET) ||
-             !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
-           (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
+               ndlp->nlp_last_elscmd = ELS_CMD_FDISC;
+       } else if ((!(ndlp->nlp_type & NLP_FABRIC) &&
+               ((ndlp->nlp_type & NLP_FCP_TARGET) ||
+               !(ndlp->nlp_type & NLP_FCP_INITIATOR))) ||
+               (ndlp->nlp_state == NLP_STE_ADISC_ISSUE)) {
                /* Only try to re-login if this is NOT a Fabric Node */
                mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ * 1);
                spin_lock_irq(shost->host_lock);
@@ -567,7 +577,7 @@ lpfc_disc_set_adisc(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp)
 {
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
 
-       if (!ndlp->nlp_rpi) {
+       if (!(ndlp->nlp_flag & NLP_RPI_VALID)) {
                ndlp->nlp_flag &= ~NLP_NPR_ADISC;
                return 0;
        }
@@ -857,7 +867,7 @@ lpfc_cmpl_plogi_plogi_issue(struct lpfc_vport *vport,
 
        lpfc_unreg_rpi(vport, ndlp);
 
-       if (lpfc_reg_login(phba, vport->vpi, irsp->un.elsreq64.remoteID,
+       if (lpfc_reg_rpi(phba, vport->vpi, irsp->un.elsreq64.remoteID,
                           (uint8_t *) sp, mbox, 0) == 0) {
                switch (ndlp->nlp_DID) {
                case NameServer_DID:
@@ -1068,6 +1078,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
        struct lpfc_iocbq *cmdiocb, *rspiocb;
        IOCB_t *irsp;
        ADISC *ap;
+       int rc;
 
        cmdiocb = (struct lpfc_iocbq *) arg;
        rspiocb = cmdiocb->context_un.rsp_iocb;
@@ -1093,6 +1104,15 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
                return ndlp->nlp_state;
        }
 
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               rc = lpfc_sli4_resume_rpi(ndlp);
+               if (rc) {
+                       /* Stay in state and retry. */
+                       ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
+                       return ndlp->nlp_state;
+               }
+       }
+
        if (ndlp->nlp_type & NLP_FCP_TARGET) {
                ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
                lpfc_nlp_set_state(vport, ndlp, NLP_STE_MAPPED_NODE);
@@ -1100,6 +1120,7 @@ lpfc_cmpl_adisc_adisc_issue(struct lpfc_vport *vport,
                ndlp->nlp_prev_state = NLP_STE_ADISC_ISSUE;
                lpfc_nlp_set_state(vport, ndlp, NLP_STE_UNMAPPED_NODE);
        }
+
        return ndlp->nlp_state;
 }
 
@@ -1190,7 +1211,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 
        /* cleanup any ndlp on mbox q waiting for reglogin cmpl */
        if ((mb = phba->sli.mbox_active)) {
-               if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+               if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
                   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
                        lpfc_nlp_put(ndlp);
                        mb->context2 = NULL;
@@ -1200,7 +1221,7 @@ lpfc_rcv_logo_reglogin_issue(struct lpfc_vport *vport,
 
        spin_lock_irq(&phba->hbalock);
        list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
-               if ((mb->mb.mbxCommand == MBX_REG_LOGIN64) &&
+               if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
                   (ndlp == (struct lpfc_nodelist *) mb->context2)) {
                        mp = (struct lpfc_dmabuf *) (mb->context1);
                        if (mp) {
@@ -1251,7 +1272,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
 {
        struct Scsi_Host *shost = lpfc_shost_from_vport(vport);
        LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
-       MAILBOX_t *mb = &pmb->mb;
+       MAILBOX_t *mb = &pmb->u.mb;
        uint32_t did  = mb->un.varWords[1];
 
        if (mb->mbxStatus) {
@@ -1283,6 +1304,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
        }
 
        ndlp->nlp_rpi = mb->un.varWords[0];
+       ndlp->nlp_flag |= NLP_RPI_VALID;
 
        /* Only if we are not a fabric nport do we issue PRLI */
        if (!(ndlp->nlp_type & NLP_FABRIC)) {
@@ -1878,11 +1900,12 @@ lpfc_cmpl_reglogin_npr_node(struct lpfc_vport *vport,
                            void *arg, uint32_t evt)
 {
        LPFC_MBOXQ_t *pmb = (LPFC_MBOXQ_t *) arg;
-       MAILBOX_t    *mb = &pmb->mb;
+       MAILBOX_t    *mb = &pmb->u.mb;
 
-       if (!mb->mbxStatus)
+       if (!mb->mbxStatus) {
                ndlp->nlp_rpi = mb->un.varWords[0];
-       else {
+               ndlp->nlp_flag |= NLP_RPI_VALID;
+       } else {
                if (ndlp->nlp_flag & NLP_NODEV_REMOVE) {
                        lpfc_drop_node(vport, ndlp);
                        return NLP_STE_FREED_NODE;
index 8032c5adb6a9b2400c318912a7aa376df512b096..e9fa6762044ae9ef398b27b9e44c51a364338528 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include <scsi/scsi_transport_fc.h>
 
 #include "lpfc_version.h"
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -57,6 +59,8 @@ static char *dif_op_str[] = {
        "SCSI_PROT_READ_CONVERT",
        "SCSI_PROT_WRITE_CONVERT"
 };
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb);
 
 static void
 lpfc_debug_save_data(struct scsi_cmnd *cmnd)
@@ -325,7 +329,7 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
 
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        shost = lpfc_shost_from_vport(vports[i]);
                        shost_for_each_device(sdev, shost) {
                                new_queue_depth =
@@ -379,7 +383,7 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
 
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for(i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        shost = lpfc_shost_from_vport(vports[i]);
                        shost_for_each_device(sdev, shost) {
                                if (vports[i]->cfg_lun_queue_depth <=
@@ -427,7 +431,7 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
 
        vports = lpfc_create_vport_work_array(phba);
        if (vports != NULL)
-               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+               for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++) {
                        shost = lpfc_shost_from_vport(vports[i]);
                        shost_for_each_device(sdev, shost) {
                                rport = starget_to_rport(scsi_target(sdev));
@@ -438,22 +442,23 @@ lpfc_scsi_dev_block(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_new_scsi_buf - Scsi buffer allocator
+ * lpfc_new_scsi_buf_s3 - Scsi buffer allocator for HBA with SLI3 IF spec
  * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
  *
- * This routine allocates a scsi buffer, which contains all the necessary
- * information needed to initiate a SCSI I/O.  The non-DMAable buffer region
- * contains information to build the IOCB.  The DMAable region contains
- * memory for the FCP CMND, FCP RSP, and the initial BPL.  In addition to
- * allocating memory, the FCP CMND and FCP RSP BDEs are setup in the BPL
- * and the BPL BDE is setup in the IOCB.
+ * This routine allocates a scsi buffer for device with SLI-3 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O. The non-DMAable buffer region contains information to build
+ * the IOCB. The DMAable region contains memory for the FCP CMND, FCP RSP,
+ * and the initial BPL. In addition to allocating memory, the FCP CMND and
+ * FCP RSP BDEs are setup in the BPL and the BPL BDE is setup in the IOCB.
  *
  * Return codes:
- *   NULL - Error
- *   Pointer to lpfc_scsi_buf data structure - Success
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
  **/
-static struct lpfc_scsi_buf *
-lpfc_new_scsi_buf(struct lpfc_vport *vport)
+static int
+lpfc_new_scsi_buf_s3(struct lpfc_vport *vport, int num_to_alloc)
 {
        struct lpfc_hba *phba = vport->phba;
        struct lpfc_scsi_buf *psb;
@@ -463,107 +468,401 @@ lpfc_new_scsi_buf(struct lpfc_vport *vport)
        dma_addr_t pdma_phys_fcp_rsp;
        dma_addr_t pdma_phys_bpl;
        uint16_t iotag;
+       int bcnt;
 
-       psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
-       if (!psb)
-               return NULL;
+       for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+               psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+               if (!psb)
+                       break;
+
+               /*
+                * Get memory from the pci pool to map the virt space to pci
+                * bus space for an I/O.  The DMA buffer includes space for the
+                * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+                * necessary to support the sg_tablesize.
+                */
+               psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+                                       GFP_KERNEL, &psb->dma_handle);
+               if (!psb->data) {
+                       kfree(psb);
+                       break;
+               }
+
+               /* Initialize virtual ptrs to dma_buf region. */
+               memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+
+               /* Allocate iotag for psb->cur_iocbq. */
+               iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+               if (iotag == 0) {
+                       pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+                                       psb->data, psb->dma_handle);
+                       kfree(psb);
+                       break;
+               }
+               psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+               psb->fcp_cmnd = psb->data;
+               psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
+               psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp);
+
+               /* Initialize local short-hand pointers. */
+               bpl = psb->fcp_bpl;
+               pdma_phys_fcp_cmd = psb->dma_handle;
+               pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
+               pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
+                       sizeof(struct fcp_rsp);
+
+               /*
+                * The first two bdes are the FCP_CMD and FCP_RSP. The balance
+                * are sg list bdes.  Initialize the first two and leave the
+                * rest for queuecommand.
+                */
+               bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
+               bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
+               bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
+               bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+               bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
+
+               /* Setup the physical region for the FCP RSP */
+               bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
+               bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
+               bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
+               bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+               bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+
+               /*
+                * Since the IOCB for the FCP I/O is built into this
+                * lpfc_scsi_buf, initialize it with all known data now.
+                */
+               iocb = &psb->cur_iocbq.iocb;
+               iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+               if ((phba->sli_rev == 3) &&
+                               !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+                       /* fill in immediate fcp command BDE */
+                       iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+                       iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
+                       iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
+                                       unsli3.fcp_ext.icd);
+                       iocb->un.fcpi64.bdl.addrHigh = 0;
+                       iocb->ulpBdeCount = 0;
+                       iocb->ulpLe = 0;
+                       /* fill in responce BDE */
+                       iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags =
+                                                       BUFF_TYPE_BDE_64;
+                       iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
+                               sizeof(struct fcp_rsp);
+                       iocb->unsli3.fcp_ext.rbde.addrLow =
+                               putPaddrLow(pdma_phys_fcp_rsp);
+                       iocb->unsli3.fcp_ext.rbde.addrHigh =
+                               putPaddrHigh(pdma_phys_fcp_rsp);
+               } else {
+                       iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+                       iocb->un.fcpi64.bdl.bdeSize =
+                                       (2 * sizeof(struct ulp_bde64));
+                       iocb->un.fcpi64.bdl.addrLow =
+                                       putPaddrLow(pdma_phys_bpl);
+                       iocb->un.fcpi64.bdl.addrHigh =
+                                       putPaddrHigh(pdma_phys_bpl);
+                       iocb->ulpBdeCount = 1;
+                       iocb->ulpLe = 1;
+               }
+               iocb->ulpClass = CLASS3;
+               psb->status = IOSTAT_SUCCESS;
+               /* Put it back into the SCSI buffer list */
+               lpfc_release_scsi_buf_s4(phba, psb);
 
-       /*
-        * Get memory from the pci pool to map the virt space to pci bus space
-        * for an I/O.  The DMA buffer includes space for the struct fcp_cmnd,
-        * struct fcp_rsp and the number of bde's necessary to support the
-        * sg_tablesize.
-        */
-       psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool, GFP_KERNEL,
-                                                       &psb->dma_handle);
-       if (!psb->data) {
-               kfree(psb);
-               return NULL;
        }
 
-       /* Initialize virtual ptrs to dma_buf region. */
-       memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
+       return bcnt;
+}
 
-       /* Allocate iotag for psb->cur_iocbq. */
-       iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
-       if (iotag == 0) {
-               pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
-                             psb->data, psb->dma_handle);
-               kfree (psb);
-               return NULL;
+/**
+ * lpfc_sli4_fcp_xri_aborted - Fast-path process of fcp xri abort
+ * @phba: pointer to lpfc hba data structure.
+ * @axri: pointer to the fcp xri abort wcqe structure.
+ *
+ * This routine is invoked by the worker thread to process a SLI4 fast-path
+ * FCP aborted xri.
+ **/
+void
+lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *phba,
+                         struct sli4_wcqe_xri_aborted *axri)
+{
+       uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
+       struct lpfc_scsi_buf *psb, *next_psb;
+       unsigned long iflag = 0;
+
+       spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
+       list_for_each_entry_safe(psb, next_psb,
+               &phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
+               if (psb->cur_iocbq.sli4_xritag == xri) {
+                       list_del(&psb->list);
+                       psb->status = IOSTAT_SUCCESS;
+                       spin_unlock_irqrestore(
+                               &phba->sli4_hba.abts_scsi_buf_list_lock,
+                               iflag);
+                       lpfc_release_scsi_buf_s4(phba, psb);
+                       return;
+               }
+       }
+       spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+                               iflag);
+}
+
+/**
+ * lpfc_sli4_repost_scsi_sgl_list - Repsot the Scsi buffers sgl pages as block
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine walks the list of scsi buffers that have been allocated and
+ * repost them to the HBA by using SGL block post. This is needed after a
+ * pci_function_reset/warm_start or start. The lpfc_hba_down_post_s4 routine
+ * is responsible for moving all scsi buffers on the lpfc_abts_scsi_sgl_list
+ * to the lpfc_scsi_buf_list. If the repost fails, reject all scsi buffers.
+ *
+ * Returns: 0 = success, non-zero failure.
+ **/
+int
+lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *phba)
+{
+       struct lpfc_scsi_buf *psb;
+       int index, status, bcnt = 0, rcnt = 0, rc = 0;
+       LIST_HEAD(sblist);
+
+       for (index = 0; index < phba->sli4_hba.scsi_xri_cnt; index++) {
+               psb = phba->sli4_hba.lpfc_scsi_psb_array[index];
+               if (psb) {
+                       /* Remove from SCSI buffer list */
+                       list_del(&psb->list);
+                       /* Add it to a local SCSI buffer list */
+                       list_add_tail(&psb->list, &sblist);
+                       if (++rcnt == LPFC_NEMBED_MBOX_SGL_CNT) {
+                               bcnt = rcnt;
+                               rcnt = 0;
+                       }
+               } else
+                       /* A hole present in the XRI array, need to skip */
+                       bcnt = rcnt;
+
+               if (index == phba->sli4_hba.scsi_xri_cnt - 1)
+                       /* End of XRI array for SCSI buffer, complete */
+                       bcnt = rcnt;
+
+               /* Continue until collect up to a nembed page worth of sgls */
+               if (bcnt == 0)
+                       continue;
+               /* Now, post the SCSI buffer list sgls as a block */
+               status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+               /* Reset SCSI buffer count for next round of posting */
+               bcnt = 0;
+               while (!list_empty(&sblist)) {
+                       list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
+                                        list);
+                       if (status) {
+                               /* Put this back on the abort scsi list */
+                               psb->status = IOSTAT_LOCAL_REJECT;
+                               psb->result = IOERR_ABORT_REQUESTED;
+                               rc++;
+                       } else
+                               psb->status = IOSTAT_SUCCESS;
+                       /* Put it back into the SCSI buffer list */
+                       lpfc_release_scsi_buf_s4(phba, psb);
+               }
        }
-       psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+       return rc;
+}
 
-       psb->fcp_cmnd = psb->data;
-       psb->fcp_rsp = psb->data + sizeof(struct fcp_cmnd);
-       psb->fcp_bpl = psb->data + sizeof(struct fcp_cmnd) +
-                                                       sizeof(struct fcp_rsp);
+/**
+ * lpfc_new_scsi_buf_s4 - Scsi buffer allocator for HBA with SLI4 IF spec
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine allocates a scsi buffer for device with SLI-4 interface spec,
+ * the scsi buffer contains all the necessary information needed to initiate
+ * a SCSI I/O.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static int
+lpfc_new_scsi_buf_s4(struct lpfc_vport *vport, int num_to_alloc)
+{
+       struct lpfc_hba *phba = vport->phba;
+       struct lpfc_scsi_buf *psb;
+       struct sli4_sge *sgl;
+       IOCB_t *iocb;
+       dma_addr_t pdma_phys_fcp_cmd;
+       dma_addr_t pdma_phys_fcp_rsp;
+       dma_addr_t pdma_phys_bpl, pdma_phys_bpl1;
+       uint16_t iotag, last_xritag = NO_XRI;
+       int status = 0, index;
+       int bcnt;
+       int non_sequential_xri = 0;
+       int rc = 0;
+       LIST_HEAD(sblist);
+
+       for (bcnt = 0; bcnt < num_to_alloc; bcnt++) {
+               psb = kzalloc(sizeof(struct lpfc_scsi_buf), GFP_KERNEL);
+               if (!psb)
+                       break;
 
-       /* Initialize local short-hand pointers. */
-       bpl = psb->fcp_bpl;
-       pdma_phys_fcp_cmd = psb->dma_handle;
-       pdma_phys_fcp_rsp = psb->dma_handle + sizeof(struct fcp_cmnd);
-       pdma_phys_bpl = psb->dma_handle + sizeof(struct fcp_cmnd) +
-                       sizeof(struct fcp_rsp);
+               /*
+                * Get memory from the pci pool to map the virt space to pci bus
+                * space for an I/O.  The DMA buffer includes space for the
+                * struct fcp_cmnd, struct fcp_rsp and the number of bde's
+                * necessary to support the sg_tablesize.
+                */
+               psb->data = pci_pool_alloc(phba->lpfc_scsi_dma_buf_pool,
+                                               GFP_KERNEL, &psb->dma_handle);
+               if (!psb->data) {
+                       kfree(psb);
+                       break;
+               }
 
-       /*
-        * The first two bdes are the FCP_CMD and FCP_RSP.  The balance are sg
-        * list bdes.  Initialize the first two and leave the rest for
-        * queuecommand.
-        */
-       bpl[0].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_cmd));
-       bpl[0].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_cmd));
-       bpl[0].tus.f.bdeSize = sizeof(struct fcp_cmnd);
-       bpl[0].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-       bpl[0].tus.w = le32_to_cpu(bpl[0].tus.w);
-
-       /* Setup the physical region for the FCP RSP */
-       bpl[1].addrHigh = le32_to_cpu(putPaddrHigh(pdma_phys_fcp_rsp));
-       bpl[1].addrLow = le32_to_cpu(putPaddrLow(pdma_phys_fcp_rsp));
-       bpl[1].tus.f.bdeSize = sizeof(struct fcp_rsp);
-       bpl[1].tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-       bpl[1].tus.w = le32_to_cpu(bpl[1].tus.w);
+               /* Initialize virtual ptrs to dma_buf region. */
+               memset(psb->data, 0, phba->cfg_sg_dma_buf_size);
 
-       /*
-        * Since the IOCB for the FCP I/O is built into this lpfc_scsi_buf,
-        * initialize it with all known data now.
-        */
-       iocb = &psb->cur_iocbq.iocb;
-       iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
-       if ((phba->sli_rev == 3) &&
-           !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
-               /* fill in immediate fcp command BDE */
-               iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_IMMED;
+               /* Allocate iotag for psb->cur_iocbq. */
+               iotag = lpfc_sli_next_iotag(phba, &psb->cur_iocbq);
+               if (iotag == 0) {
+                       kfree(psb);
+                       break;
+               }
+
+               psb->cur_iocbq.sli4_xritag = lpfc_sli4_next_xritag(phba);
+               if (psb->cur_iocbq.sli4_xritag == NO_XRI) {
+                       pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+                             psb->data, psb->dma_handle);
+                       kfree(psb);
+                       break;
+               }
+               if (last_xritag != NO_XRI
+                       && psb->cur_iocbq.sli4_xritag != (last_xritag+1)) {
+                       non_sequential_xri = 1;
+               } else
+                       list_add_tail(&psb->list, &sblist);
+               last_xritag = psb->cur_iocbq.sli4_xritag;
+
+               index = phba->sli4_hba.scsi_xri_cnt++;
+               psb->cur_iocbq.iocb_flag |= LPFC_IO_FCP;
+
+               psb->fcp_bpl = psb->data;
+               psb->fcp_cmnd = (psb->data + phba->cfg_sg_dma_buf_size)
+                       - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               psb->fcp_rsp = (struct fcp_rsp *)((uint8_t *)psb->fcp_cmnd +
+                                       sizeof(struct fcp_cmnd));
+
+               /* Initialize local short-hand pointers. */
+               sgl = (struct sli4_sge *)psb->fcp_bpl;
+               pdma_phys_bpl = psb->dma_handle;
+               pdma_phys_fcp_cmd =
+                       (psb->dma_handle + phba->cfg_sg_dma_buf_size)
+                        - (sizeof(struct fcp_cmnd) + sizeof(struct fcp_rsp));
+               pdma_phys_fcp_rsp = pdma_phys_fcp_cmd + sizeof(struct fcp_cmnd);
+
+               /*
+                * The first two bdes are the FCP_CMD and FCP_RSP.  The balance
+                * are sg list bdes.  Initialize the first two and leave the
+                * rest for queuecommand.
+                */
+               sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_cmd));
+               sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_cmd));
+               bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_cmnd));
+               bf_set(lpfc_sli4_sge_last, sgl, 0);
+               sgl->word2 = cpu_to_le32(sgl->word2);
+               sgl->word3 = cpu_to_le32(sgl->word3);
+               sgl++;
+
+               /* Setup the physical region for the FCP RSP */
+               sgl->addr_hi = cpu_to_le32(putPaddrHigh(pdma_phys_fcp_rsp));
+               sgl->addr_lo = cpu_to_le32(putPaddrLow(pdma_phys_fcp_rsp));
+               bf_set(lpfc_sli4_sge_len, sgl, sizeof(struct fcp_rsp));
+               bf_set(lpfc_sli4_sge_last, sgl, 1);
+               sgl->word2 = cpu_to_le32(sgl->word2);
+               sgl->word3 = cpu_to_le32(sgl->word3);
+
+               /*
+                * Since the IOCB for the FCP I/O is built into this
+                * lpfc_scsi_buf, initialize it with all known data now.
+                */
+               iocb = &psb->cur_iocbq.iocb;
+               iocb->un.fcpi64.bdl.ulpIoTag32 = 0;
+               iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BDE_64;
+               /* setting the BLP size to 2 * sizeof BDE may not be correct.
+                * We are setting the bpl to point to out sgl. An sgl's
+                * entries are 16 bytes, a bpl entries are 12 bytes.
+                */
                iocb->un.fcpi64.bdl.bdeSize = sizeof(struct fcp_cmnd);
-               iocb->un.fcpi64.bdl.addrLow = offsetof(IOCB_t,
-                                                      unsli3.fcp_ext.icd);
-               iocb->un.fcpi64.bdl.addrHigh = 0;
-               iocb->ulpBdeCount = 0;
-               iocb->ulpLe = 0;
-               /* fill in responce BDE */
-               iocb->unsli3.fcp_ext.rbde.tus.f.bdeFlags = BUFF_TYPE_BDE_64;
-               iocb->unsli3.fcp_ext.rbde.tus.f.bdeSize =
-                                               sizeof(struct fcp_rsp);
-               iocb->unsli3.fcp_ext.rbde.addrLow =
-                                               putPaddrLow(pdma_phys_fcp_rsp);
-               iocb->unsli3.fcp_ext.rbde.addrHigh =
-                                               putPaddrHigh(pdma_phys_fcp_rsp);
-       } else {
-               iocb->un.fcpi64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
-               iocb->un.fcpi64.bdl.bdeSize = (2 * sizeof(struct ulp_bde64));
-               iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_bpl);
-               iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_bpl);
+               iocb->un.fcpi64.bdl.addrLow = putPaddrLow(pdma_phys_fcp_cmd);
+               iocb->un.fcpi64.bdl.addrHigh = putPaddrHigh(pdma_phys_fcp_cmd);
                iocb->ulpBdeCount = 1;
                iocb->ulpLe = 1;
+               iocb->ulpClass = CLASS3;
+               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+                       pdma_phys_bpl1 = pdma_phys_bpl + SGL_PAGE_SIZE;
+               else
+                       pdma_phys_bpl1 = 0;
+               psb->dma_phys_bpl = pdma_phys_bpl;
+               phba->sli4_hba.lpfc_scsi_psb_array[index] = psb;
+               if (non_sequential_xri) {
+                       status = lpfc_sli4_post_sgl(phba, pdma_phys_bpl,
+                                               pdma_phys_bpl1,
+                                               psb->cur_iocbq.sli4_xritag);
+                       if (status) {
+                               /* Put this back on the abort scsi list */
+                               psb->status = IOSTAT_LOCAL_REJECT;
+                               psb->result = IOERR_ABORT_REQUESTED;
+                               rc++;
+                       } else
+                               psb->status = IOSTAT_SUCCESS;
+                       /* Put it back into the SCSI buffer list */
+                       lpfc_release_scsi_buf_s4(phba, psb);
+                       break;
+               }
+       }
+       if (bcnt) {
+               status = lpfc_sli4_post_scsi_sgl_block(phba, &sblist, bcnt);
+               /* Reset SCSI buffer count for next round of posting */
+               while (!list_empty(&sblist)) {
+                       list_remove_head(&sblist, psb, struct lpfc_scsi_buf,
+                                list);
+                       if (status) {
+                               /* Put this back on the abort scsi list */
+                               psb->status = IOSTAT_LOCAL_REJECT;
+                               psb->result = IOERR_ABORT_REQUESTED;
+                               rc++;
+                       } else
+                               psb->status = IOSTAT_SUCCESS;
+                       /* Put it back into the SCSI buffer list */
+                       lpfc_release_scsi_buf_s4(phba, psb);
+               }
        }
-       iocb->ulpClass = CLASS3;
 
-       return psb;
+       return bcnt + non_sequential_xri - rc;
 }
 
 /**
- * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list list of Hba
- * @phba: The Hba for which this call is being executed.
+ * lpfc_new_scsi_buf - Wrapper funciton for scsi buffer allocator
+ * @vport: The virtual port for which this call being executed.
+ * @num_to_allocate: The requested number of buffers to allocate.
+ *
+ * This routine wraps the actual SCSI buffer allocator function pointer from
+ * the lpfc_hba struct.
+ *
+ * Return codes:
+ *   int - number of scsi buffers that were allocated.
+ *   0 = failure, less than num_to_alloc is a partial failure.
+ **/
+static inline int
+lpfc_new_scsi_buf(struct lpfc_vport *vport, int num_to_alloc)
+{
+       return vport->phba->lpfc_new_scsi_buf(vport, num_to_alloc);
+}
+
+/**
+ * lpfc_get_scsi_buf - Get a scsi buffer from lpfc_scsi_buf_list of the HBA
+ * @phba: The HBA for which this call is being executed.
  *
  * This routine removes a scsi buffer from head of @phba lpfc_scsi_buf_list list
  * and returns to caller.
@@ -591,7 +890,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
 }
 
 /**
- * lpfc_release_scsi_buf - Return a scsi buffer back to hba's lpfc_scsi_buf_list
+ * lpfc_release_scsi_buf - Return a scsi buffer back to hba scsi buf list
  * @phba: The Hba for which this call is being executed.
  * @psb: The scsi buffer which is being released.
  *
@@ -599,7 +898,7 @@ lpfc_get_scsi_buf(struct lpfc_hba * phba)
  * lpfc_scsi_buf_list list.
  **/
 static void
-lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+lpfc_release_scsi_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        unsigned long iflag = 0;
 
@@ -610,21 +909,69 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 }
 
 /**
- * lpfc_scsi_prep_dma_buf - Routine to do DMA mapping for scsi buffer
+ * lpfc_release_scsi_buf_s4: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list. For SLI4 XRI's are tied to the scsi buffer
+ * and cannot be reused for at least RA_TOV amount of time if it was
+ * aborted.
+ **/
+static void
+lpfc_release_scsi_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+       unsigned long iflag = 0;
+
+       if (psb->status == IOSTAT_LOCAL_REJECT
+               && psb->result == IOERR_ABORT_REQUESTED) {
+               spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock,
+                                       iflag);
+               psb->pCmd = NULL;
+               list_add_tail(&psb->list,
+                       &phba->sli4_hba.lpfc_abts_scsi_buf_list);
+               spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
+                                       iflag);
+       } else {
+
+               spin_lock_irqsave(&phba->scsi_buf_list_lock, iflag);
+               psb->pCmd = NULL;
+               list_add_tail(&psb->list, &phba->lpfc_scsi_buf_list);
+               spin_unlock_irqrestore(&phba->scsi_buf_list_lock, iflag);
+       }
+}
+
+/**
+ * lpfc_release_scsi_buf: Return a scsi buffer back to hba scsi buf list.
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is being released.
+ *
+ * This routine releases @psb scsi buffer by adding it to tail of @phba
+ * lpfc_scsi_buf_list list.
+ **/
+static void
+lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+
+       phba->lpfc_release_scsi_buf(phba, psb);
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
  * @lpfc_cmd: The scsi buffer which is going to be mapped.
  *
  * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
- * field of @lpfc_cmd. This routine scans through sg elements and format the
- * bdea. This routine also initializes all IOCB fields which are dependent on
- * scsi command request buffer.
+ * field of @lpfc_cmd for device with SLI-3 interface spec. This routine scans
+ * through sg elements and format the bdea. This routine also initializes all
+ * IOCB fields which are dependent on scsi command request buffer.
  *
  * Return codes:
  *   1 - Error
  *   0 - Success
  **/
 static int
-lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 {
        struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
        struct scatterlist *sgel = NULL;
@@ -1411,6 +1758,133 @@ out:
        return ret;
 }
 
+/**
+ * lpfc_scsi_prep_dma_buf_s4 - DMA mapping for scsi buffer to SLI4 IF spec
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine does the pci dma mapping for scatter-gather list of scsi cmnd
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ *     1 - Error
+ *     0 - Success
+ **/
+static int
+lpfc_scsi_prep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+       struct scsi_cmnd *scsi_cmnd = lpfc_cmd->pCmd;
+       struct scatterlist *sgel = NULL;
+       struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
+       struct sli4_sge *sgl = (struct sli4_sge *)lpfc_cmd->fcp_bpl;
+       IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
+       dma_addr_t physaddr;
+       uint32_t num_bde = 0;
+       uint32_t dma_len;
+       uint32_t dma_offset = 0;
+       int nseg;
+
+       /*
+        * There are three possibilities here - use scatter-gather segment, use
+        * the single mapping, or neither.  Start the lpfc command prep by
+        * bumping the bpl beyond the fcp_cmnd and fcp_rsp regions to the first
+        * data bde entry.
+        */
+       if (scsi_sg_count(scsi_cmnd)) {
+               /*
+                * The driver stores the segment count returned from pci_map_sg
+                * because this a count of dma-mappings used to map the use_sg
+                * pages.  They are not guaranteed to be the same for those
+                * architectures that implement an IOMMU.
+                */
+
+               nseg = scsi_dma_map(scsi_cmnd);
+               if (unlikely(!nseg))
+                       return 1;
+               sgl += 1;
+               /* clear the last flag in the fcp_rsp map entry */
+               sgl->word2 = le32_to_cpu(sgl->word2);
+               bf_set(lpfc_sli4_sge_last, sgl, 0);
+               sgl->word2 = cpu_to_le32(sgl->word2);
+               sgl += 1;
+
+               lpfc_cmd->seg_cnt = nseg;
+               if (lpfc_cmd->seg_cnt > phba->cfg_sg_seg_cnt) {
+                       printk(KERN_ERR "%s: Too many sg segments from "
+                              "dma_map_sg.  Config %d, seg_cnt %d\n",
+                              __func__, phba->cfg_sg_seg_cnt,
+                              lpfc_cmd->seg_cnt);
+                       scsi_dma_unmap(scsi_cmnd);
+                       return 1;
+               }
+
+               /*
+                * The driver established a maximum scatter-gather segment count
+                * during probe that limits the number of sg elements in any
+                * single scsi command.  Just run through the seg_cnt and format
+                * the sge's.
+                * When using SLI-3 the driver will try to fit all the BDEs into
+                * the IOCB. If it can't then the BDEs get added to a BPL as it
+                * does for SLI-2 mode.
+                */
+               scsi_for_each_sg(scsi_cmnd, sgel, nseg, num_bde) {
+                       physaddr = sg_dma_address(sgel);
+                       dma_len = sg_dma_len(sgel);
+                       bf_set(lpfc_sli4_sge_len, sgl, sg_dma_len(sgel));
+                       sgl->addr_lo = cpu_to_le32(putPaddrLow(physaddr));
+                       sgl->addr_hi = cpu_to_le32(putPaddrHigh(physaddr));
+                       if ((num_bde + 1) == nseg)
+                               bf_set(lpfc_sli4_sge_last, sgl, 1);
+                       else
+                               bf_set(lpfc_sli4_sge_last, sgl, 0);
+                       bf_set(lpfc_sli4_sge_offset, sgl, dma_offset);
+                       sgl->word2 = cpu_to_le32(sgl->word2);
+                       sgl->word3 = cpu_to_le32(sgl->word3);
+                       dma_offset += dma_len;
+                       sgl++;
+               }
+       } else {
+               sgl += 1;
+               /* clear the last flag in the fcp_rsp map entry */
+               sgl->word2 = le32_to_cpu(sgl->word2);
+               bf_set(lpfc_sli4_sge_last, sgl, 1);
+               sgl->word2 = cpu_to_le32(sgl->word2);
+       }
+
+       /*
+        * Finish initializing those IOCB fields that are dependent on the
+        * scsi_cmnd request_buffer.  Note that for SLI-2 the bdeSize is
+        * explicitly reinitialized.
+        * all iocb memory resources are reused.
+        */
+       fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
+
+       /*
+        * Due to difference in data length between DIF/non-DIF paths,
+        * we need to set word 4 of IOCB here
+        */
+       iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
+       return 0;
+}
+
+/**
+ * lpfc_scsi_prep_dma_buf - Wrapper function for DMA mapping of scsi buffer
+ * @phba: The Hba for which this call is being executed.
+ * @lpfc_cmd: The scsi buffer which is going to be mapped.
+ *
+ * This routine wraps the actual DMA mapping function pointer from the
+ * lpfc_hba struct.
+ *
+ * Return codes:
+ *     1 - Error
+ *     0 - Success
+ **/
+static inline int
+lpfc_scsi_prep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
+{
+       return phba->lpfc_scsi_prep_dma_buf(phba, lpfc_cmd);
+}
+
 /**
  * lpfc_send_scsi_error_event - Posts an event when there is SCSI error
  * @phba: Pointer to hba context object.
@@ -1504,15 +1978,15 @@ lpfc_send_scsi_error_event(struct lpfc_hba *phba, struct lpfc_vport *vport,
 }
 
 /**
- * lpfc_scsi_unprep_dma_buf - Routine to un-map DMA mapping of scatter gather
- * @phba: The Hba for which this call is being executed.
+ * lpfc_scsi_unprep_dma_buf_s3 - Un-map DMA mapping of SG-list for SLI3 dev
+ * @phba: The HBA for which this call is being executed.
  * @psb: The scsi buffer which is going to be un-mapped.
  *
  * This routine does DMA un-mapping of scatter gather list of scsi command
- * field of @lpfc_cmd.
+ * field of @lpfc_cmd for device with SLI-3 interface spec.
  **/
 static void
-lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
+lpfc_scsi_unprep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
 {
        /*
         * There are only two special cases to consider.  (1) the scsi command
@@ -1528,6 +2002,36 @@ lpfc_scsi_unprep_dma_buf(struct lpfc_hba * phba, struct lpfc_scsi_buf * psb)
                                psb->pCmd->sc_data_direction);
 }
 
+/**
+ * lpfc_scsi_unprep_dma_buf_s4 - Un-map DMA mapping of SG-list for SLI4 dev
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-4 interface spec. If we have to
+ * remove the sgl for this scsi buffer then we will do it here. For now
+ * we should be able to just call the sli3 unprep routine.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf_s4(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+       lpfc_scsi_unprep_dma_buf_s3(phba, psb);
+}
+
+/**
+ * lpfc_scsi_unprep_dma_buf - Wrapper function for unmap DMA mapping of SG-list
+ * @phba: The Hba for which this call is being executed.
+ * @psb: The scsi buffer which is going to be un-mapped.
+ *
+ * This routine does DMA un-mapping of scatter gather list of scsi command
+ * field of @lpfc_cmd for device with SLI-4 interface spec.
+ **/
+static void
+lpfc_scsi_unprep_dma_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
+{
+       phba->lpfc_scsi_unprep_dma_buf(phba, psb);
+}
+
 /**
  * lpfc_handler_fcp_err - FCP response handler
  * @vport: The virtual port for which this call is being executed.
@@ -1676,7 +2180,7 @@ lpfc_handle_fcp_err(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
  * lpfc_scsi_cmd_iocb_cmpl - Scsi cmnd IOCB completion routine
  * @phba: The Hba for which this call is being executed.
  * @pIocbIn: The command IOCBQ for the scsi cmnd.
- * @pIocbOut: The response IOCBQ for the scsi cmnd .
+ * @pIocbOut: The response IOCBQ for the scsi cmnd.
  *
  * This routine assigns scsi command result by looking into response IOCB
  * status field appropriately. This routine handles QUEUE FULL condition as
@@ -1957,16 +2461,16 @@ lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
 }
 
 /**
- * lpfc_scsi_prep_cmnd -  Routine to convert scsi cmnd to FCP information unit
+ * lpfc_scsi_prep_cmnd_s3 - Convert scsi cmnd to FCP infor unit for SLI3 dev
  * @vport: The virtual port for which this call is being executed.
  * @lpfc_cmd: The scsi command which needs to send.
  * @pnode: Pointer to lpfc_nodelist.
  *
  * This routine initializes fcp_cmnd and iocb data structure from scsi command
- * to transfer.
+ * to transfer for device with SLI3 interface spec.
  **/
 static void
-lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+lpfc_scsi_prep_cmnd_s3(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
                    struct lpfc_nodelist *pnode)
 {
        struct lpfc_hba *phba = vport->phba;
@@ -2013,8 +2517,11 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
        if (scsi_sg_count(scsi_cmnd)) {
                if (datadir == DMA_TO_DEVICE) {
                        iocb_cmd->ulpCommand = CMD_FCP_IWRITE64_CR;
-                       iocb_cmd->un.fcpi.fcpi_parm = 0;
-                       iocb_cmd->ulpPU = 0;
+                       if (phba->sli_rev < LPFC_SLI_REV4) {
+                               iocb_cmd->un.fcpi.fcpi_parm = 0;
+                               iocb_cmd->ulpPU = 0;
+                       } else
+                               iocb_cmd->ulpPU = PARM_READ_CHECK;
                        fcp_cmnd->fcpCntl3 = WRITE_DATA;
                        phba->fc4OutputRequests++;
                } else {
@@ -2051,20 +2558,60 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
 }
 
 /**
- * lpfc_scsi_prep_task_mgmt_cmnd - Convert scsi TM cmnd to FCP information unit
+ * lpfc_scsi_prep_cmnd_s4 - Convert scsi cmnd to FCP infor unit for SLI4 dev
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine initializes fcp_cmnd and iocb data structure from scsi command
+ * to transfer for device with SLI4 interface spec.
+ **/
+static void
+lpfc_scsi_prep_cmnd_s4(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+                      struct lpfc_nodelist *pnode)
+{
+       /*
+        * The prep cmnd routines do not touch the sgl or its
+        * entries. We may not have to do anything different.
+        * I will leave this function in place until we can
+        * run some IO through the driver and determine if changes
+        * are needed.
+        */
+       return lpfc_scsi_prep_cmnd_s3(vport, lpfc_cmd, pnode);
+}
+
+/**
+ * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: The scsi command which needs to send.
+ * @pnode: Pointer to lpfc_nodelist.
+ *
+ * This routine wraps the actual convert SCSI cmnd function pointer from
+ * the lpfc_hba struct.
+ **/
+static inline void
+lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
+                   struct lpfc_nodelist *pnode)
+{
+       vport->phba->lpfc_scsi_prep_cmnd(vport, lpfc_cmd, pnode);
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd_s3 - Convert SLI3 scsi TM cmd to FCP info unit
  * @vport: The virtual port for which this call is being executed.
  * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
  * @lun: Logical unit number.
  * @task_mgmt_cmd: SCSI task management command.
  *
- * This routine creates FCP information unit corresponding to @task_mgmt_cmd.
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-3 interface spec.
  *
  * Return codes:
  *   0 - Error
  *   1 - Success
  **/
 static int
-lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+lpfc_scsi_prep_task_mgmt_cmd_s3(struct lpfc_vport *vport,
                             struct lpfc_scsi_buf *lpfc_cmd,
                             unsigned int lun,
                             uint8_t task_mgmt_cmd)
@@ -2113,6 +2660,107 @@ lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
        return 1;
 }
 
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd_s4 - Convert SLI4 scsi TM cmd to FCP info unit
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine creates FCP information unit corresponding to @task_mgmt_cmd
+ * for device with SLI-4 interface spec.
+ *
+ * Return codes:
+ *     0 - Error
+ *     1 - Success
+ **/
+static int
+lpfc_scsi_prep_task_mgmt_cmd_s4(struct lpfc_vport *vport,
+                               struct lpfc_scsi_buf *lpfc_cmd,
+                               unsigned int lun,
+                               uint8_t task_mgmt_cmd)
+{
+       /*
+        * The prep cmnd routines do not touch the sgl or its
+        * entries. We may not have to do anything different.
+        * I will leave this function in place until we can
+        * run some IO through the driver and determine if changes
+        * are needed.
+        */
+       return lpfc_scsi_prep_task_mgmt_cmd_s3(vport, lpfc_cmd, lun,
+                                               task_mgmt_cmd);
+}
+
+/**
+ * lpfc_scsi_prep_task_mgmt_cmnd - Wrapper func convert scsi TM cmd to FCP info
+ * @vport: The virtual port for which this call is being executed.
+ * @lpfc_cmd: Pointer to lpfc_scsi_buf data structure.
+ * @lun: Logical unit number.
+ * @task_mgmt_cmd: SCSI task management command.
+ *
+ * This routine wraps the actual convert SCSI TM to FCP information unit
+ * function pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ *     0 - Error
+ *     1 - Success
+ **/
+static inline int
+lpfc_scsi_prep_task_mgmt_cmd(struct lpfc_vport *vport,
+                            struct lpfc_scsi_buf *lpfc_cmd,
+                            unsigned int lun,
+                            uint8_t task_mgmt_cmd)
+{
+       struct lpfc_hba *phba = vport->phba;
+
+       return phba->lpfc_scsi_prep_task_mgmt_cmd(vport, lpfc_cmd, lun,
+                                                 task_mgmt_cmd);
+}
+
+/**
+ * lpfc_scsi_api_table_setup - Set up scsi api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SCSI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_scsi_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+       switch (dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s3;
+               phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s3;
+               phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s3;
+               phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s3;
+               phba->lpfc_scsi_prep_task_mgmt_cmd =
+                                       lpfc_scsi_prep_task_mgmt_cmd_s3;
+               phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s3;
+               break;
+       case LPFC_PCI_DEV_OC:
+               phba->lpfc_new_scsi_buf = lpfc_new_scsi_buf_s4;
+               phba->lpfc_scsi_prep_dma_buf = lpfc_scsi_prep_dma_buf_s4;
+               phba->lpfc_scsi_prep_cmnd = lpfc_scsi_prep_cmnd_s4;
+               phba->lpfc_scsi_unprep_dma_buf = lpfc_scsi_unprep_dma_buf_s4;
+               phba->lpfc_scsi_prep_task_mgmt_cmd =
+                                       lpfc_scsi_prep_task_mgmt_cmd_s4;
+               phba->lpfc_release_scsi_buf = lpfc_release_scsi_buf_s4;
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1418 Invalid HBA PCI-device group: 0x%x\n",
+                               dev_grp);
+               return -ENODEV;
+               break;
+       }
+       phba->lpfc_get_scsi_buf = lpfc_get_scsi_buf;
+       phba->lpfc_rampdown_queue_depth = lpfc_rampdown_queue_depth;
+       return 0;
+}
+
 /**
  * lpfc_taskmgmt_def_cmpl - IOCB completion routine for task management command
  * @phba: The Hba for which this call is being executed.
@@ -2178,9 +2826,8 @@ lpfc_scsi_tgt_reset(struct lpfc_scsi_buf *lpfc_cmd, struct lpfc_vport *vport,
        lpfc_printf_vlog(vport, KERN_INFO, LOG_FCP,
                         "0702 Issue Target Reset to TGT %d Data: x%x x%x\n",
                         tgt_id, rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
-       status = lpfc_sli_issue_iocb_wait(phba,
-                                      &phba->sli.ring[phba->sli.fcp_ring],
-                                      iocbq, iocbqrsp, lpfc_cmd->timeout);
+       status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
+                                         iocbq, iocbqrsp, lpfc_cmd->timeout);
        if (status != IOCB_SUCCESS) {
                if (status == IOCB_TIMEDOUT) {
                        iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2305,7 +2952,6 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
        struct Scsi_Host  *shost = cmnd->device->host;
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
-       struct lpfc_sli   *psli = &phba->sli;
        struct lpfc_rport_data *rdata = cmnd->device->hostdata;
        struct lpfc_nodelist *ndlp = rdata->pnode;
        struct lpfc_scsi_buf *lpfc_cmd;
@@ -2427,7 +3073,7 @@ lpfc_queuecommand(struct scsi_cmnd *cmnd, void (*done) (struct scsi_cmnd *))
        lpfc_scsi_prep_cmnd(vport, lpfc_cmd, ndlp);
 
        atomic_inc(&ndlp->cmd_pending);
-       err = lpfc_sli_issue_iocb(phba, &phba->sli.ring[psli->fcp_ring],
+       err = lpfc_sli_issue_iocb(phba, LPFC_FCP_RING,
                                  &lpfc_cmd->cur_iocbq, SLI_IOCB_RET_IOCB);
        if (err) {
                atomic_dec(&ndlp->cmd_pending);
@@ -2490,7 +3136,6 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        struct Scsi_Host  *shost = cmnd->device->host;
        struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
        struct lpfc_hba   *phba = vport->phba;
-       struct lpfc_sli_ring *pring = &phba->sli.ring[phba->sli.fcp_ring];
        struct lpfc_iocbq *iocb;
        struct lpfc_iocbq *abtsiocb;
        struct lpfc_scsi_buf *lpfc_cmd;
@@ -2531,7 +3176,10 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
        icmd = &abtsiocb->iocb;
        icmd->un.acxri.abortType = ABORT_TYPE_ABTS;
        icmd->un.acxri.abortContextTag = cmd->ulpContext;
-       icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               icmd->un.acxri.abortIoTag = iocb->sli4_xritag;
+       else
+               icmd->un.acxri.abortIoTag = cmd->ulpIoTag;
 
        icmd->ulpLe = 1;
        icmd->ulpClass = cmd->ulpClass;
@@ -2542,7 +3190,8 @@ lpfc_abort_handler(struct scsi_cmnd *cmnd)
 
        abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
        abtsiocb->vport = vport;
-       if (lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0) == IOCB_ERROR) {
+       if (lpfc_sli_issue_iocb(phba, LPFC_FCP_RING, abtsiocb, 0) ==
+           IOCB_ERROR) {
                lpfc_sli_release_iocbq(phba, abtsiocb);
                ret = FAILED;
                goto out;
@@ -2668,8 +3317,7 @@ lpfc_device_reset_handler(struct scsi_cmnd *cmnd)
                         "0703 Issue target reset to TGT %d LUN %d "
                         "rpi x%x nlp_flag x%x\n", cmnd->device->id,
                         cmnd->device->lun, pnode->nlp_rpi, pnode->nlp_flag);
-       status = lpfc_sli_issue_iocb_wait(phba,
-                                         &phba->sli.ring[phba->sli.fcp_ring],
+       status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
                                          iocbq, iocbqrsp, lpfc_cmd->timeout);
        if (status == IOCB_TIMEDOUT) {
                iocbq->iocb_cmpl = lpfc_tskmgmt_def_cmpl;
@@ -2825,11 +3473,10 @@ lpfc_slave_alloc(struct scsi_device *sdev)
 {
        struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
        struct lpfc_hba   *phba = vport->phba;
-       struct lpfc_scsi_buf *scsi_buf = NULL;
        struct fc_rport *rport = starget_to_rport(scsi_target(sdev));
-       uint32_t total = 0, i;
+       uint32_t total = 0;
        uint32_t num_to_alloc = 0;
-       unsigned long flags;
+       int num_allocated = 0;
 
        if (!rport || fc_remote_port_chkready(rport))
                return -ENXIO;
@@ -2863,20 +3510,13 @@ lpfc_slave_alloc(struct scsi_device *sdev)
                                 (phba->cfg_hba_queue_depth - total));
                num_to_alloc = phba->cfg_hba_queue_depth - total;
        }
-
-       for (i = 0; i < num_to_alloc; i++) {
-               scsi_buf = lpfc_new_scsi_buf(vport);
-               if (!scsi_buf) {
-                       lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
-                                        "0706 Failed to allocate "
-                                        "command buffer\n");
-                       break;
-               }
-
-               spin_lock_irqsave(&phba->scsi_buf_list_lock, flags);
-               phba->total_scsi_bufs++;
-               list_add_tail(&scsi_buf->list, &phba->lpfc_scsi_buf_list);
-               spin_unlock_irqrestore(&phba->scsi_buf_list_lock, flags);
+       num_allocated = lpfc_new_scsi_buf(vport, num_to_alloc);
+       if (num_to_alloc != num_allocated) {
+                       lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
+                                "0708 Allocation request of %d "
+                                "command buffers did not succeed.  "
+                                "Allocated %d buffers.\n",
+                                num_to_alloc, num_allocated);
        }
        return 0;
 }
index c7c440d5fa29668010bc53df87e0c2901328bdb7..65dfc8bd5b49ae385e6d15c51909c245aed71da0 100644 (file)
@@ -140,6 +140,8 @@ struct lpfc_scsi_buf {
        struct fcp_rsp *fcp_rsp;
        struct ulp_bde64 *fcp_bpl;
 
+       dma_addr_t dma_phys_bpl;
+
        /* cur_iocbq has phys of the dma-able buffer.
         * Iotag is in here
         */
index eb5c75c45ba4aa1e4414fb880101e77edd60a511..ff04daf18f48d1ba4df538638b746c2d87a1236d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************
  * This file is part of the Emulex Linux Device Driver for         *
  * Fibre Channel Host Bus Adapters.                                *
- * Copyright (C) 2004-2008 Emulex.  All rights reserved.           *
+ * Copyright (C) 2004-2009 Emulex.  All rights reserved.           *
  * EMULEX and SLI are trademarks of Emulex.                        *
  * www.emulex.com                                                  *
  * Portions Copyright (C) 2004-2005 Christoph Hellwig              *
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
+#include <scsi/fc/fc_fs.h>
 
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
 #include "lpfc_logmsg.h"
 #include "lpfc_compat.h"
 #include "lpfc_debugfs.h"
-
-/*
- * Define macro to log: Mailbox command x%x cannot issue Data
- * This allows multiple uses of lpfc_msgBlk0311
- * w/o perturbing log msg utility.
- */
-#define LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag) \
-                       lpfc_printf_log(phba, \
-                               KERN_INFO, \
-                               LOG_MBOX | LOG_SLI, \
-                               "(%d):0311 Mailbox command x%x cannot " \
-                               "issue Data: x%x x%x x%x\n", \
-                               pmbox->vport ? pmbox->vport->vpi : 0, \
-                               pmbox->mb.mbxCommand,           \
-                               phba->pport->port_state,        \
-                               psli->sli_flag, \
-                               flag)
-
+#include "lpfc_vport.h"
 
 /* There are only four IOCB completion types. */
 typedef enum _lpfc_iocb_type {
@@ -67,6 +53,350 @@ typedef enum _lpfc_iocb_type {
        LPFC_ABORT_IOCB
 } lpfc_iocb_type;
 
+
+/* Provide function prototypes local to this module. */
+static int lpfc_sli_issue_mbox_s4(struct lpfc_hba *, LPFC_MBOXQ_t *,
+                                 uint32_t);
+static int lpfc_sli4_read_rev(struct lpfc_hba *, LPFC_MBOXQ_t *,
+                           uint8_t *, uint32_t *);
+
+static IOCB_t *
+lpfc_get_iocb_from_iocbq(struct lpfc_iocbq *iocbq)
+{
+       return &iocbq->iocb;
+}
+
+/**
+ * lpfc_sli4_wq_put - Put a Work Queue Entry on an Work Queue
+ * @q: The Work Queue to operate on.
+ * @wqe: The work Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_wq_put(struct lpfc_queue *q, union lpfc_wqe *wqe)
+{
+       union lpfc_wqe *temp_wqe = q->qe[q->host_index].wqe;
+       struct lpfc_register doorbell;
+       uint32_t host_index;
+
+       /* If the host has not yet processed the next entry then we are done */
+       if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+               return -ENOMEM;
+       /* set consumption flag every once in a while */
+       if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
+               bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
+
+       lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
+
+       /* Update the host index before invoking device */
+       host_index = q->host_index;
+       q->host_index = ((q->host_index + 1) % q->entry_count);
+
+       /* Ring Doorbell */
+       doorbell.word0 = 0;
+       bf_set(lpfc_wq_doorbell_num_posted, &doorbell, 1);
+       bf_set(lpfc_wq_doorbell_index, &doorbell, host_index);
+       bf_set(lpfc_wq_doorbell_id, &doorbell, q->queue_id);
+       writel(doorbell.word0, q->phba->sli4_hba.WQDBregaddr);
+       readl(q->phba->sli4_hba.WQDBregaddr); /* Flush */
+
+       return 0;
+}
+
+/**
+ * lpfc_sli4_wq_release - Updates internal hba index for WQ
+ * @q: The Work Queue to operate on.
+ * @index: The index to advance the hba index to.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * Work Queue Entries by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_wq_release(struct lpfc_queue *q, uint32_t index)
+{
+       uint32_t released = 0;
+
+       if (q->hba_index == index)
+               return 0;
+       do {
+               q->hba_index = ((q->hba_index + 1) % q->entry_count);
+               released++;
+       } while (q->hba_index != index);
+       return released;
+}
+
+/**
+ * lpfc_sli4_mq_put - Put a Mailbox Queue Entry on an Mailbox Queue
+ * @q: The Mailbox Queue to operate on.
+ * @wqe: The Mailbox Queue Entry to put on the Work queue.
+ *
+ * This routine will copy the contents of @mqe to the next available entry on
+ * the @q. This function will then ring the Work Queue Doorbell to signal the
+ * HBA to start processing the Work Queue Entry. This function returns 0 if
+ * successful. If no entries are available on @q then this function will return
+ * -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static uint32_t
+lpfc_sli4_mq_put(struct lpfc_queue *q, struct lpfc_mqe *mqe)
+{
+       struct lpfc_mqe *temp_mqe = q->qe[q->host_index].mqe;
+       struct lpfc_register doorbell;
+       uint32_t host_index;
+
+       /* If the host has not yet processed the next entry then we are done */
+       if (((q->host_index + 1) % q->entry_count) == q->hba_index)
+               return -ENOMEM;
+       lpfc_sli_pcimem_bcopy(mqe, temp_mqe, q->entry_size);
+       /* Save off the mailbox pointer for completion */
+       q->phba->mbox = (MAILBOX_t *)temp_mqe;
+
+       /* Update the host index before invoking device */
+       host_index = q->host_index;
+       q->host_index = ((q->host_index + 1) % q->entry_count);
+
+       /* Ring Doorbell */
+       doorbell.word0 = 0;
+       bf_set(lpfc_mq_doorbell_num_posted, &doorbell, 1);
+       bf_set(lpfc_mq_doorbell_id, &doorbell, q->queue_id);
+       writel(doorbell.word0, q->phba->sli4_hba.MQDBregaddr);
+       readl(q->phba->sli4_hba.MQDBregaddr); /* Flush */
+       return 0;
+}
+
+/**
+ * lpfc_sli4_mq_release - Updates internal hba index for MQ
+ * @q: The Mailbox Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * a Mailbox Queue Entry by the HBA. When the HBA indicates that it has consumed
+ * an entry the host calls this function to update the queue's internal
+ * pointers. This routine returns the number of entries that were consumed by
+ * the HBA.
+ **/
+static uint32_t
+lpfc_sli4_mq_release(struct lpfc_queue *q)
+{
+       /* Clear the mailbox pointer for completion */
+       q->phba->mbox = NULL;
+       q->hba_index = ((q->hba_index + 1) % q->entry_count);
+       return 1;
+}
+
+/**
+ * lpfc_sli4_eq_get - Gets the next valid EQE from a EQ
+ * @q: The Event Queue to get the first valid EQE from
+ *
+ * This routine will get the first valid Event Queue Entry from @q, update
+ * the queue's internal hba index, and return the EQE. If no valid EQEs are in
+ * the Queue (no more work to do), or the Queue is full of EQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_eqe *
+lpfc_sli4_eq_get(struct lpfc_queue *q)
+{
+       struct lpfc_eqe *eqe = q->qe[q->hba_index].eqe;
+
+       /* If the next EQE is not valid then we are done */
+       if (!bf_get(lpfc_eqe_valid, eqe))
+               return NULL;
+       /* If the host has not yet processed the next entry then we are done */
+       if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+               return NULL;
+
+       q->hba_index = ((q->hba_index + 1) % q->entry_count);
+       return eqe;
+}
+
+/**
+ * lpfc_sli4_eq_release - Indicates the host has finished processing an EQ
+ * @q: The Event Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Event Queue Entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the EQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of EQEs that were popped.
+ **/
+uint32_t
+lpfc_sli4_eq_release(struct lpfc_queue *q, bool arm)
+{
+       uint32_t released = 0;
+       struct lpfc_eqe *temp_eqe;
+       struct lpfc_register doorbell;
+
+       /* while there are valid entries */
+       while (q->hba_index != q->host_index) {
+               temp_eqe = q->qe[q->host_index].eqe;
+               bf_set(lpfc_eqe_valid, temp_eqe, 0);
+               released++;
+               q->host_index = ((q->host_index + 1) % q->entry_count);
+       }
+       if (unlikely(released == 0 && !arm))
+               return 0;
+
+       /* ring doorbell for number popped */
+       doorbell.word0 = 0;
+       if (arm) {
+               bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+               bf_set(lpfc_eqcq_doorbell_eqci, &doorbell, 1);
+       }
+       bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+       bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_EVENT);
+       bf_set(lpfc_eqcq_doorbell_eqid, &doorbell, q->queue_id);
+       writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+       return released;
+}
+
+/**
+ * lpfc_sli4_cq_get - Gets the next valid CQE from a CQ
+ * @q: The Completion Queue to get the first valid CQE from
+ *
+ * This routine will get the first valid Completion Queue Entry from @q, update
+ * the queue's internal hba index, and return the CQE. If no valid CQEs are in
+ * the Queue (no more work to do), or the Queue is full of CQEs that have been
+ * processed, but not popped back to the HBA then this routine will return NULL.
+ **/
+static struct lpfc_cqe *
+lpfc_sli4_cq_get(struct lpfc_queue *q)
+{
+       struct lpfc_cqe *cqe;
+
+       /* If the next CQE is not valid then we are done */
+       if (!bf_get(lpfc_cqe_valid, q->qe[q->hba_index].cqe))
+               return NULL;
+       /* If the host has not yet processed the next entry then we are done */
+       if (((q->hba_index + 1) % q->entry_count) == q->host_index)
+               return NULL;
+
+       cqe = q->qe[q->hba_index].cqe;
+       q->hba_index = ((q->hba_index + 1) % q->entry_count);
+       return cqe;
+}
+
+/**
+ * lpfc_sli4_cq_release - Indicates the host has finished processing a CQ
+ * @q: The Completion Queue that the host has completed processing for.
+ * @arm: Indicates whether the host wants to arms this CQ.
+ *
+ * This routine will mark all Completion queue entries on @q, from the last
+ * known completed entry to the last entry that was processed, as completed
+ * by clearing the valid bit for each completion queue entry. Then it will
+ * notify the HBA, by ringing the doorbell, that the CQEs have been processed.
+ * The internal host index in the @q will be updated by this routine to indicate
+ * that the host has finished processing the entries. The @arm parameter
+ * indicates that the queue should be rearmed when ringing the doorbell.
+ *
+ * This function will return the number of CQEs that were released.
+ **/
+uint32_t
+lpfc_sli4_cq_release(struct lpfc_queue *q, bool arm)
+{
+       uint32_t released = 0;
+       struct lpfc_cqe *temp_qe;
+       struct lpfc_register doorbell;
+
+       /* while there are valid entries */
+       while (q->hba_index != q->host_index) {
+               temp_qe = q->qe[q->host_index].cqe;
+               bf_set(lpfc_cqe_valid, temp_qe, 0);
+               released++;
+               q->host_index = ((q->host_index + 1) % q->entry_count);
+       }
+       if (unlikely(released == 0 && !arm))
+               return 0;
+
+       /* ring doorbell for number popped */
+       doorbell.word0 = 0;
+       if (arm)
+               bf_set(lpfc_eqcq_doorbell_arm, &doorbell, 1);
+       bf_set(lpfc_eqcq_doorbell_num_released, &doorbell, released);
+       bf_set(lpfc_eqcq_doorbell_qt, &doorbell, LPFC_QUEUE_TYPE_COMPLETION);
+       bf_set(lpfc_eqcq_doorbell_cqid, &doorbell, q->queue_id);
+       writel(doorbell.word0, q->phba->sli4_hba.EQCQDBregaddr);
+       return released;
+}
+
+/**
+ * lpfc_sli4_rq_put - Put a Receive Buffer Queue Entry on a Receive Queue
+ * @q: The Header Receive Queue to operate on.
+ * @wqe: The Receive Queue Entry to put on the Receive queue.
+ *
+ * This routine will copy the contents of @wqe to the next available entry on
+ * the @q. This function will then ring the Receive Queue Doorbell to signal the
+ * HBA to start processing the Receive Queue Entry. This function returns the
+ * index that the rqe was copied to if successful. If no entries are available
+ * on @q then this function will return -ENOMEM.
+ * The caller is expected to hold the hbalock when calling this routine.
+ **/
+static int
+lpfc_sli4_rq_put(struct lpfc_queue *hq, struct lpfc_queue *dq,
+                struct lpfc_rqe *hrqe, struct lpfc_rqe *drqe)
+{
+       struct lpfc_rqe *temp_hrqe = hq->qe[hq->host_index].rqe;
+       struct lpfc_rqe *temp_drqe = dq->qe[dq->host_index].rqe;
+       struct lpfc_register doorbell;
+       int put_index = hq->host_index;
+
+       if (hq->type != LPFC_HRQ || dq->type != LPFC_DRQ)
+               return -EINVAL;
+       if (hq->host_index != dq->host_index)
+               return -EINVAL;
+       /* If the host has not yet processed the next entry then we are done */
+       if (((hq->host_index + 1) % hq->entry_count) == hq->hba_index)
+               return -EBUSY;
+       lpfc_sli_pcimem_bcopy(hrqe, temp_hrqe, hq->entry_size);
+       lpfc_sli_pcimem_bcopy(drqe, temp_drqe, dq->entry_size);
+
+       /* Update the host index to point to the next slot */
+       hq->host_index = ((hq->host_index + 1) % hq->entry_count);
+       dq->host_index = ((dq->host_index + 1) % dq->entry_count);
+
+       /* Ring The Header Receive Queue Doorbell */
+       if (!(hq->host_index % LPFC_RQ_POST_BATCH)) {
+               doorbell.word0 = 0;
+               bf_set(lpfc_rq_doorbell_num_posted, &doorbell,
+                      LPFC_RQ_POST_BATCH);
+               bf_set(lpfc_rq_doorbell_id, &doorbell, hq->queue_id);
+               writel(doorbell.word0, hq->phba->sli4_hba.RQDBregaddr);
+       }
+       return put_index;
+}
+
+/**
+ * lpfc_sli4_rq_release - Updates internal hba index for RQ
+ * @q: The Header Receive Queue to operate on.
+ *
+ * This routine will update the HBA index of a queue to reflect consumption of
+ * one Receive Queue Entry by the HBA. When the HBA indicates that it has
+ * consumed an entry the host calls this function to update the queue's
+ * internal pointers. This routine returns the number of entries that were
+ * consumed by the HBA.
+ **/
+static uint32_t
+lpfc_sli4_rq_release(struct lpfc_queue *hq, struct lpfc_queue *dq)
+{
+       if ((hq->type != LPFC_HRQ) || (dq->type != LPFC_DRQ))
+               return 0;
+       hq->hba_index = ((hq->hba_index + 1) % hq->entry_count);
+       dq->hba_index = ((dq->hba_index + 1) % dq->entry_count);
+       return 1;
+}
+
 /**
  * lpfc_cmd_iocb - Get next command iocb entry in the ring
  * @phba: Pointer to HBA context object.
@@ -120,6 +450,76 @@ __lpfc_sli_get_iocbq(struct lpfc_hba *phba)
        return iocbq;
 }
 
+/**
+ * __lpfc_clear_active_sglq - Remove the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function clears the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+static struct lpfc_sglq *
+__lpfc_clear_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+       uint16_t adj_xri;
+       struct lpfc_sglq *sglq;
+       adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+       if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
+               return NULL;
+       sglq = phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+       phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = NULL;
+       return sglq;
+}
+
+/**
+ * __lpfc_get_active_sglq - Get the active sglq for this XRI.
+ * @phba: Pointer to HBA context object.
+ * @xritag: XRI value.
+ *
+ * This function returns the sglq pointer from the array of acive
+ * sglq's. The xritag that is passed in is used to index into the
+ * array. Before the xritag can be used it needs to be adjusted
+ * by subtracting the xribase.
+ *
+ * Returns sglq ponter = success, NULL = Failure.
+ **/
+static struct lpfc_sglq *
+__lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
+{
+       uint16_t adj_xri;
+       struct lpfc_sglq *sglq;
+       adj_xri = xritag - phba->sli4_hba.max_cfg_param.xri_base;
+       if (adj_xri > phba->sli4_hba.max_cfg_param.max_xri)
+               return NULL;
+       sglq =  phba->sli4_hba.lpfc_sglq_active_list[adj_xri];
+       return sglq;
+}
+
+/**
+ * __lpfc_sli_get_sglq - Allocates an iocb object from sgl pool
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with hbalock held. This function
+ * Gets a new driver sglq object from the sglq list. If the
+ * list is not empty then it is successful, it returns pointer to the newly
+ * allocated sglq object else it returns NULL.
+ **/
+static struct lpfc_sglq *
+__lpfc_sli_get_sglq(struct lpfc_hba *phba)
+{
+       struct list_head *lpfc_sgl_list = &phba->sli4_hba.lpfc_sgl_list;
+       struct lpfc_sglq *sglq = NULL;
+       uint16_t adj_xri;
+       list_remove_head(lpfc_sgl_list, sglq, struct lpfc_sglq, list);
+       adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
+       phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+       return sglq;
+}
+
 /**
  * lpfc_sli_get_iocbq - Allocates an iocb object from iocb pool
  * @phba: Pointer to HBA context object.
@@ -142,7 +542,7 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
 }
 
 /**
- * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * __lpfc_sli_release_iocbq_s4 - Release iocb to the iocb pool
  * @phba: Pointer to HBA context object.
  * @iocbq: Pointer to driver iocb object.
  *
@@ -150,9 +550,62 @@ lpfc_sli_get_iocbq(struct lpfc_hba *phba)
  * iocb object to the iocb pool. The iotag in the iocb object
  * does not change for each use of the iocb object. This function
  * clears all other fields of the iocb object when it is freed.
+ * The sqlq structure that holds the xritag and phys and virtual
+ * mappings for the scatter gather list is retrieved from the
+ * active array of sglq. The get of the sglq pointer also clears
+ * the entry in the array. If the status of the IO indiactes that
+ * this IO was aborted then the sglq entry it put on the
+ * lpfc_abts_els_sgl_list until the CQ_ABORTED_XRI is received. If the
+ * IO has good status or fails for any other reason then the sglq
+ * entry is added to the free list (lpfc_sgl_list).
  **/
 static void
-__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+__lpfc_sli_release_iocbq_s4(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+       struct lpfc_sglq *sglq;
+       size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
+       unsigned long iflag;
+
+       if (iocbq->sli4_xritag == NO_XRI)
+               sglq = NULL;
+       else
+               sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
+       if (sglq)  {
+               if (iocbq->iocb_flag & LPFC_DRIVER_ABORTED
+                       || ((iocbq->iocb.ulpStatus == IOSTAT_LOCAL_REJECT)
+                       && (iocbq->iocb.un.ulpWord[4]
+                               == IOERR_SLI_ABORTED))) {
+                       spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
+                                       iflag);
+                       list_add(&sglq->list,
+                               &phba->sli4_hba.lpfc_abts_els_sgl_list);
+                       spin_unlock_irqrestore(
+                               &phba->sli4_hba.abts_sgl_list_lock, iflag);
+               } else
+                       list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+       }
+
+
+       /*
+        * Clean all volatile data fields, preserve iotag and node struct.
+        */
+       memset((char *)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+       iocbq->sli4_xritag = NO_XRI;
+       list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
+}
+
+/**
+ * __lpfc_sli_release_iocbq_s3 - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq_s3(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
 {
        size_t start_clean = offsetof(struct lpfc_iocbq, iocb);
 
@@ -160,9 +613,26 @@ __lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
         * Clean all volatile data fields, preserve iotag and node struct.
         */
        memset((char*)iocbq + start_clean, 0, sizeof(*iocbq) - start_clean);
+       iocbq->sli4_xritag = NO_XRI;
        list_add_tail(&iocbq->list, &phba->lpfc_iocb_list);
 }
 
+/**
+ * __lpfc_sli_release_iocbq - Release iocb to the iocb pool
+ * @phba: Pointer to HBA context object.
+ * @iocbq: Pointer to driver iocb object.
+ *
+ * This function is called with hbalock held to release driver
+ * iocb object to the iocb pool. The iotag in the iocb object
+ * does not change for each use of the iocb object. This function
+ * clears all other fields of the iocb object when it is freed.
+ **/
+static void
+__lpfc_sli_release_iocbq(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq)
+{
+       phba->__lpfc_sli_release_iocbq(phba, iocbq);
+}
+
 /**
  * lpfc_sli_release_iocbq - Release iocb to the iocb pool
  * @phba: Pointer to HBA context object.
@@ -281,6 +751,14 @@ lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd)
        case CMD_GEN_REQUEST64_CR:
        case CMD_GEN_REQUEST64_CX:
        case CMD_XMIT_ELS_RSP64_CX:
+       case DSSCMD_IWRITE64_CR:
+       case DSSCMD_IWRITE64_CX:
+       case DSSCMD_IREAD64_CR:
+       case DSSCMD_IREAD64_CX:
+       case DSSCMD_INVALIDATE_DEK:
+       case DSSCMD_SET_KEK:
+       case DSSCMD_GET_KEK_ID:
+       case DSSCMD_GEN_XFER:
                type = LPFC_SOL_IOCB;
                break;
        case CMD_ABORT_XRI_CN:
@@ -348,7 +826,7 @@ lpfc_sli_ring_map(struct lpfc_hba *phba)
        pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
        if (!pmb)
                return -ENOMEM;
-       pmbox = &pmb->mb;
+       pmbox = &pmb->u.mb;
        phba->link_state = LPFC_INIT_MBX_CMDS;
        for (i = 0; i < psli->num_rings; i++) {
                lpfc_config_ring(phba, i, pmb);
@@ -779,8 +1257,8 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
                phba->hbqs[i].buffer_count = 0;
        }
        /* Return all HBQ buffer that are in-fly */
-       list_for_each_entry_safe(dmabuf, next_dmabuf,
-                       &phba->hbqbuf_in_list, list) {
+       list_for_each_entry_safe(dmabuf, next_dmabuf, &phba->rb_pend_list,
+                                list) {
                hbq_buf = container_of(dmabuf, struct hbq_dmabuf, dbuf);
                list_del(&hbq_buf->dbuf.list);
                if (hbq_buf->tag == -1) {
@@ -814,9 +1292,27 @@ lpfc_sli_hbqbuf_free_all(struct lpfc_hba *phba)
  * pointer to the hbq entry if it successfully post the buffer
  * else it will return NULL.
  **/
-static struct lpfc_hbq_entry *
+static int
 lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
                         struct hbq_dmabuf *hbq_buf)
+{
+       return phba->lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buf);
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s3 - Post the hbq buffer to SLI3 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post a hbq buffer to the
+ * firmware. If the function finds an empty slot in the HBQ, it will post the
+ * buffer and place it on the hbq_buffer_list. The function will return zero if
+ * it successfully post the buffer else it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s3(struct lpfc_hba *phba, uint32_t hbqno,
+                           struct hbq_dmabuf *hbq_buf)
 {
        struct lpfc_hbq_entry *hbqe;
        dma_addr_t physaddr = hbq_buf->dbuf.phys;
@@ -838,8 +1334,40 @@ lpfc_sli_hbq_to_firmware(struct lpfc_hba *phba, uint32_t hbqno,
                                /* flush */
                readl(phba->hbq_put + hbqno);
                list_add_tail(&hbq_buf->dbuf.list, &hbqp->hbq_buffer_list);
-       }
-       return hbqe;
+               return 0;
+       } else
+               return -ENOMEM;
+}
+
+/**
+ * lpfc_sli_hbq_to_firmware_s4 - Post the hbq buffer to SLI4 firmware
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ * @hbq_buf: Pointer to HBQ buffer.
+ *
+ * This function is called with the hbalock held to post an RQE to the SLI4
+ * firmware. If able to post the RQE to the RQ it will queue the hbq entry to
+ * the hbq_buffer_list and return zero, otherwise it will return an error.
+ **/
+static int
+lpfc_sli_hbq_to_firmware_s4(struct lpfc_hba *phba, uint32_t hbqno,
+                           struct hbq_dmabuf *hbq_buf)
+{
+       int rc;
+       struct lpfc_rqe hrqe;
+       struct lpfc_rqe drqe;
+
+       hrqe.address_lo = putPaddrLow(hbq_buf->hbuf.phys);
+       hrqe.address_hi = putPaddrHigh(hbq_buf->hbuf.phys);
+       drqe.address_lo = putPaddrLow(hbq_buf->dbuf.phys);
+       drqe.address_hi = putPaddrHigh(hbq_buf->dbuf.phys);
+       rc = lpfc_sli4_rq_put(phba->sli4_hba.hdr_rq, phba->sli4_hba.dat_rq,
+                             &hrqe, &drqe);
+       if (rc < 0)
+               return rc;
+       hbq_buf->tag = rc;
+       list_add_tail(&hbq_buf->dbuf.list, &phba->hbqs[hbqno].hbq_buffer_list);
+       return 0;
 }
 
 /* HBQ for ELS and CT traffic. */
@@ -914,7 +1442,7 @@ lpfc_sli_hbqbuf_fill_hbqs(struct lpfc_hba *phba, uint32_t hbqno, uint32_t count)
                                 dbuf.list);
                hbq_buffer->tag = (phba->hbqs[hbqno].buffer_count |
                                      (hbqno << 16));
-               if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+               if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
                        phba->hbqs[hbqno].buffer_count++;
                        posted++;
                } else
@@ -964,6 +1492,25 @@ lpfc_sli_hbqbuf_init_hbqs(struct lpfc_hba *phba, uint32_t qno)
                                         lpfc_hbq_defs[qno]->init_count));
 }
 
+/**
+ * lpfc_sli_hbqbuf_get - Remove the first hbq off of an hbq list
+ * @phba: Pointer to HBA context object.
+ * @hbqno: HBQ number.
+ *
+ * This function removes the first hbq buffer on an hbq list and returns a
+ * pointer to that buffer. If it finds no buffers on the list it returns NULL.
+ **/
+static struct hbq_dmabuf *
+lpfc_sli_hbqbuf_get(struct list_head *rb_list)
+{
+       struct lpfc_dmabuf *d_buf;
+
+       list_remove_head(rb_list, d_buf, struct lpfc_dmabuf, list);
+       if (!d_buf)
+               return NULL;
+       return container_of(d_buf, struct hbq_dmabuf, dbuf);
+}
+
 /**
  * lpfc_sli_hbqbuf_find - Find the hbq buffer associated with a tag
  * @phba: Pointer to HBA context object.
@@ -985,12 +1532,15 @@ lpfc_sli_hbqbuf_find(struct lpfc_hba *phba, uint32_t tag)
        if (hbqno >= LPFC_MAX_HBQS)
                return NULL;
 
+       spin_lock_irq(&phba->hbalock);
        list_for_each_entry(d_buf, &phba->hbqs[hbqno].hbq_buffer_list, list) {
                hbq_buf = container_of(d_buf, struct hbq_dmabuf, dbuf);
                if (hbq_buf->tag == tag) {
+                       spin_unlock_irq(&phba->hbalock);
                        return hbq_buf;
                }
        }
+       spin_unlock_irq(&phba->hbalock);
        lpfc_printf_log(phba, KERN_ERR, LOG_SLI | LOG_VPORT,
                        "1803 Bad hbq tag. Data: x%x x%x\n",
                        tag, phba->hbqs[tag >> 16].buffer_count);
@@ -1013,9 +1563,8 @@ lpfc_sli_free_hbq(struct lpfc_hba *phba, struct hbq_dmabuf *hbq_buffer)
 
        if (hbq_buffer) {
                hbqno = hbq_buffer->tag >> 16;
-               if (!lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer)) {
+               if (lpfc_sli_hbq_to_firmware(phba, hbqno, hbq_buffer))
                        (phba->hbqs[hbqno].hbq_free_buffer)(phba, hbq_buffer);
-               }
        }
 }
 
@@ -1086,6 +1635,15 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
        case MBX_HEARTBEAT:
        case MBX_PORT_CAPABILITIES:
        case MBX_PORT_IOV_CONTROL:
+       case MBX_SLI4_CONFIG:
+       case MBX_SLI4_REQ_FTRS:
+       case MBX_REG_FCFI:
+       case MBX_UNREG_FCFI:
+       case MBX_REG_VFI:
+       case MBX_UNREG_VFI:
+       case MBX_INIT_VPI:
+       case MBX_INIT_VFI:
+       case MBX_RESUME_RPI:
                ret = mbxCommand;
                break;
        default:
@@ -1106,7 +1664,7 @@ lpfc_sli_chk_mbx_command(uint8_t mbxCommand)
  * will wake up thread waiting on the wait queue pointed by context1
  * of the mailbox.
  **/
-static void
+void
 lpfc_sli_wake_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
        wait_queue_head_t *pdone_q;
@@ -1140,7 +1698,7 @@ void
 lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
        struct lpfc_dmabuf *mp;
-       uint16_t rpi;
+       uint16_t rpi, vpi;
        int rc;
 
        mp = (struct lpfc_dmabuf *) (pmb->context1);
@@ -1150,24 +1708,30 @@ lpfc_sli_def_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
                kfree(mp);
        }
 
+       if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
+           (phba->sli_rev == LPFC_SLI_REV4))
+               lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
+
        /*
         * If a REG_LOGIN succeeded  after node is destroyed or node
         * is in re-discovery driver need to cleanup the RPI.
         */
        if (!(phba->pport->load_flag & FC_UNLOADING) &&
-           pmb->mb.mbxCommand == MBX_REG_LOGIN64 &&
-           !pmb->mb.mbxStatus) {
-
-               rpi = pmb->mb.un.varWords[0];
-               lpfc_unreg_login(phba, pmb->mb.un.varRegLogin.vpi, rpi, pmb);
+           pmb->u.mb.mbxCommand == MBX_REG_LOGIN64 &&
+           !pmb->u.mb.mbxStatus) {
+               rpi = pmb->u.mb.un.varWords[0];
+               vpi = pmb->u.mb.un.varRegLogin.vpi - phba->vpi_base;
+               lpfc_unreg_login(phba, vpi, rpi, pmb);
                pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
                if (rc != MBX_NOT_FINISHED)
                        return;
        }
 
-       mempool_free(pmb, phba->mbox_mem_pool);
-       return;
+       if (bf_get(lpfc_mqe_command, &pmb->u.mqe) == MBX_SLI4_CONFIG)
+               lpfc_sli4_mbox_cmd_free(phba, pmb);
+       else
+               mempool_free(pmb, phba->mbox_mem_pool);
 }
 
 /**
@@ -1204,7 +1768,7 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                if (pmb == NULL)
                        break;
 
-               pmbox = &pmb->mb;
+               pmbox = &pmb->u.mb;
 
                if (pmbox->mbxCommand != MBX_HEARTBEAT) {
                        if (pmb->vport) {
@@ -1233,9 +1797,10 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                        /* Unknow mailbox command compl */
                        lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
                                        "(%d):0323 Unknown Mailbox command "
-                                       "%x Cmpl\n",
+                                       "x%x (x%x) Cmpl\n",
                                        pmb->vport ? pmb->vport->vpi : 0,
-                                       pmbox->mbxCommand);
+                                       pmbox->mbxCommand,
+                                       lpfc_sli4_mbox_opcode_get(phba, pmb));
                        phba->link_state = LPFC_HBA_ERROR;
                        phba->work_hs = HS_FFER3;
                        lpfc_handle_eratt(phba);
@@ -1250,29 +1815,29 @@ lpfc_sli_handle_mb_event(struct lpfc_hba *phba)
                                                LOG_MBOX | LOG_SLI,
                                                "(%d):0305 Mbox cmd cmpl "
                                                "error - RETRYing Data: x%x "
-                                               "x%x x%x x%x\n",
+                                               "(x%x) x%x x%x x%x\n",
                                                pmb->vport ? pmb->vport->vpi :0,
                                                pmbox->mbxCommand,
+                                               lpfc_sli4_mbox_opcode_get(phba,
+                                                                         pmb),
                                                pmbox->mbxStatus,
                                                pmbox->un.varWords[0],
                                                pmb->vport->port_state);
                                pmbox->mbxStatus = 0;
                                pmbox->mbxOwner = OWN_HOST;
-                               spin_lock_irq(&phba->hbalock);
-                               phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-                               spin_unlock_irq(&phba->hbalock);
                                rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-                               if (rc == MBX_SUCCESS)
+                               if (rc != MBX_NOT_FINISHED)
                                        continue;
                        }
                }
 
                /* Mailbox cmd <cmd> Cmpl <cmpl> */
                lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
-                               "(%d):0307 Mailbox cmd x%x Cmpl x%p "
+                               "(%d):0307 Mailbox cmd x%x (x%x) Cmpl x%p "
                                "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
                                pmb->vport ? pmb->vport->vpi : 0,
                                pmbox->mbxCommand,
+                               lpfc_sli4_mbox_opcode_get(phba, pmb),
                                pmb->mbox_cmpl,
                                *((uint32_t *) pmbox),
                                pmbox->un.varWords[0],
@@ -1317,6 +1882,45 @@ lpfc_sli_get_buff(struct lpfc_hba *phba,
        return &hbq_entry->dbuf;
 }
 
+/**
+ * lpfc_complete_unsol_iocb - Complete an unsolicited sequence
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @saveq: Pointer to the iocbq struct representing the sequence starting frame.
+ * @fch_r_ctl: the r_ctl for the first frame of the sequence.
+ * @fch_type: the type for the first frame of the sequence.
+ *
+ * This function is called with no lock held. This function uses the r_ctl and
+ * type of the received sequence to find the correct callback function to call
+ * to process the sequence.
+ **/
+static int
+lpfc_complete_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                        struct lpfc_iocbq *saveq, uint32_t fch_r_ctl,
+                        uint32_t fch_type)
+{
+       int i;
+
+       /* unSolicited Responses */
+       if (pring->prt[0].profile) {
+               if (pring->prt[0].lpfc_sli_rcv_unsol_event)
+                       (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
+                                                                       saveq);
+               return 1;
+       }
+       /* We must search, based on rctl / type
+          for the right routine */
+       for (i = 0; i < pring->num_mask; i++) {
+               if ((pring->prt[i].rctl == fch_r_ctl) &&
+                   (pring->prt[i].type == fch_type)) {
+                       if (pring->prt[i].lpfc_sli_rcv_unsol_event)
+                               (pring->prt[i].lpfc_sli_rcv_unsol_event)
+                                               (phba, pring, saveq);
+                       return 1;
+               }
+       }
+       return 0;
+}
 
 /**
  * lpfc_sli_process_unsol_iocb - Unsolicited iocb handler
@@ -1339,7 +1943,7 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        IOCB_t           * irsp;
        WORD5            * w5p;
        uint32_t           Rctl, Type;
-       uint32_t           match, i;
+       uint32_t           match;
        struct lpfc_iocbq *iocbq;
        struct lpfc_dmabuf *dmzbuf;
 
@@ -1482,35 +2086,12 @@ lpfc_sli_process_unsol_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                }
        }
 
-       /* unSolicited Responses */
-       if (pring->prt[0].profile) {
-               if (pring->prt[0].lpfc_sli_rcv_unsol_event)
-                       (pring->prt[0].lpfc_sli_rcv_unsol_event) (phba, pring,
-                                                                       saveq);
-               match = 1;
-       } else {
-               /* We must search, based on rctl / type
-                  for the right routine */
-               for (i = 0; i < pring->num_mask; i++) {
-                       if ((pring->prt[i].rctl == Rctl)
-                           && (pring->prt[i].type == Type)) {
-                               if (pring->prt[i].lpfc_sli_rcv_unsol_event)
-                                       (pring->prt[i].lpfc_sli_rcv_unsol_event)
-                                                       (phba, pring, saveq);
-                               match = 1;
-                               break;
-                       }
-               }
-       }
-       if (match == 0) {
-               /* Unexpected Rctl / Type received */
-               /* Ring <ringno> handler: unexpected
-                  Rctl <Rctl> Type <Type> received */
+       if (!lpfc_complete_unsol_iocb(phba, pring, saveq, Rctl, Type))
                lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
                                "0313 Ring %d handler: unexpected Rctl x%x "
                                "Type x%x received\n",
                                pring->ringno, Rctl, Type);
-       }
+
        return 1;
 }
 
@@ -1551,6 +2132,37 @@ lpfc_sli_iocbq_lookup(struct lpfc_hba *phba,
        return NULL;
 }
 
+/**
+ * lpfc_sli_iocbq_lookup_by_tag - Find command iocb for the iotag
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @iotag: IOCB tag.
+ *
+ * This function looks up the iocb_lookup table to get the command iocb
+ * corresponding to the given iotag. This function is called with the
+ * hbalock held.
+ * This function returns the command iocb object if it finds the command
+ * iocb else returns NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_iocbq_lookup_by_tag(struct lpfc_hba *phba,
+                            struct lpfc_sli_ring *pring, uint16_t iotag)
+{
+       struct lpfc_iocbq *cmd_iocb;
+
+       if (iotag != 0 && iotag <= phba->sli.last_iotag) {
+               cmd_iocb = phba->sli.iocbq_lookup[iotag];
+               list_del_init(&cmd_iocb->list);
+               pring->txcmplq_cnt--;
+               return cmd_iocb;
+       }
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "0372 iotag x%x is out off range: max iotag (x%x)\n",
+                       iotag, phba->sli.last_iotag);
+       return NULL;
+}
+
 /**
  * lpfc_sli_process_sol_iocb - process solicited iocb completion
  * @phba: Pointer to HBA context object.
@@ -1954,7 +2566,7 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
                        if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
                                (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
                                spin_unlock_irqrestore(&phba->hbalock, iflag);
-                               lpfc_rampdown_queue_depth(phba);
+                               phba->lpfc_rampdown_queue_depth(phba);
                                spin_lock_irqsave(&phba->hbalock, iflag);
                        }
 
@@ -2068,39 +2680,215 @@ lpfc_sli_handle_fast_ring_event(struct lpfc_hba *phba,
 }
 
 /**
- * lpfc_sli_handle_slow_ring_event - Handle ring events for non-FCP rings
+ * lpfc_sli_sp_handle_rspiocb - Handle slow-path response iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @rspiocbp: Pointer to driver response IOCB object.
+ *
+ * This function is called from the worker thread when there is a slow-path
+ * response IOCB to process. This function chains all the response iocbs until
+ * seeing the iocb with the LE bit set. The function will call
+ * lpfc_sli_process_sol_iocb function if the response iocb indicates a
+ * completion of a command iocb. The function will call the
+ * lpfc_sli_process_unsol_iocb function if this is an unsolicited iocb.
+ * The function frees the resources or calls the completion handler if this
+ * iocb is an abort completion. The function returns NULL when the response
+ * iocb has the LE bit set and all the chained iocbs are processed, otherwise
+ * this function shall chain the iocb on to the iocb_continueq and return the
+ * response iocb passed in.
+ **/
+static struct lpfc_iocbq *
+lpfc_sli_sp_handle_rspiocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+                       struct lpfc_iocbq *rspiocbp)
+{
+       struct lpfc_iocbq *saveq;
+       struct lpfc_iocbq *cmdiocbp;
+       struct lpfc_iocbq *next_iocb;
+       IOCB_t *irsp = NULL;
+       uint32_t free_saveq;
+       uint8_t iocb_cmd_type;
+       lpfc_iocb_type type;
+       unsigned long iflag;
+       int rc;
+
+       spin_lock_irqsave(&phba->hbalock, iflag);
+       /* First add the response iocb to the countinueq list */
+       list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
+       pring->iocb_continueq_cnt++;
+
+       /* Now, determine whetehr the list is completed for processing */
+       irsp = &rspiocbp->iocb;
+       if (irsp->ulpLe) {
+               /*
+                * By default, the driver expects to free all resources
+                * associated with this iocb completion.
+                */
+               free_saveq = 1;
+               saveq = list_get_first(&pring->iocb_continueq,
+                                      struct lpfc_iocbq, list);
+               irsp = &(saveq->iocb);
+               list_del_init(&pring->iocb_continueq);
+               pring->iocb_continueq_cnt = 0;
+
+               pring->stats.iocb_rsp++;
+
+               /*
+                * If resource errors reported from HBA, reduce
+                * queuedepths of the SCSI device.
+                */
+               if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
+                   (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
+                       spin_unlock_irqrestore(&phba->hbalock, iflag);
+                       phba->lpfc_rampdown_queue_depth(phba);
+                       spin_lock_irqsave(&phba->hbalock, iflag);
+               }
+
+               if (irsp->ulpStatus) {
+                       /* Rsp ring <ringno> error: IOCB */
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                                       "0328 Rsp Ring %d error: "
+                                       "IOCB Data: "
+                                       "x%x x%x x%x x%x "
+                                       "x%x x%x x%x x%x "
+                                       "x%x x%x x%x x%x "
+                                       "x%x x%x x%x x%x\n",
+                                       pring->ringno,
+                                       irsp->un.ulpWord[0],
+                                       irsp->un.ulpWord[1],
+                                       irsp->un.ulpWord[2],
+                                       irsp->un.ulpWord[3],
+                                       irsp->un.ulpWord[4],
+                                       irsp->un.ulpWord[5],
+                                       *(((uint32_t *) irsp) + 6),
+                                       *(((uint32_t *) irsp) + 7),
+                                       *(((uint32_t *) irsp) + 8),
+                                       *(((uint32_t *) irsp) + 9),
+                                       *(((uint32_t *) irsp) + 10),
+                                       *(((uint32_t *) irsp) + 11),
+                                       *(((uint32_t *) irsp) + 12),
+                                       *(((uint32_t *) irsp) + 13),
+                                       *(((uint32_t *) irsp) + 14),
+                                       *(((uint32_t *) irsp) + 15));
+               }
+
+               /*
+                * Fetch the IOCB command type and call the correct completion
+                * routine. Solicited and Unsolicited IOCBs on the ELS ring
+                * get freed back to the lpfc_iocb_list by the discovery
+                * kernel thread.
+                */
+               iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
+               type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
+               switch (type) {
+               case LPFC_SOL_IOCB:
+                       spin_unlock_irqrestore(&phba->hbalock, iflag);
+                       rc = lpfc_sli_process_sol_iocb(phba, pring, saveq);
+                       spin_lock_irqsave(&phba->hbalock, iflag);
+                       break;
+
+               case LPFC_UNSOL_IOCB:
+                       spin_unlock_irqrestore(&phba->hbalock, iflag);
+                       rc = lpfc_sli_process_unsol_iocb(phba, pring, saveq);
+                       spin_lock_irqsave(&phba->hbalock, iflag);
+                       if (!rc)
+                               free_saveq = 0;
+                       break;
+
+               case LPFC_ABORT_IOCB:
+                       cmdiocbp = NULL;
+                       if (irsp->ulpCommand != CMD_XRI_ABORTED_CX)
+                               cmdiocbp = lpfc_sli_iocbq_lookup(phba, pring,
+                                                                saveq);
+                       if (cmdiocbp) {
+                               /* Call the specified completion routine */
+                               if (cmdiocbp->iocb_cmpl) {
+                                       spin_unlock_irqrestore(&phba->hbalock,
+                                                              iflag);
+                                       (cmdiocbp->iocb_cmpl)(phba, cmdiocbp,
+                                                             saveq);
+                                       spin_lock_irqsave(&phba->hbalock,
+                                                         iflag);
+                               } else
+                                       __lpfc_sli_release_iocbq(phba,
+                                                                cmdiocbp);
+                       }
+                       break;
+
+               case LPFC_UNKNOWN_IOCB:
+                       if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
+                               char adaptermsg[LPFC_MAX_ADPTMSG];
+                               memset(adaptermsg, 0, LPFC_MAX_ADPTMSG);
+                               memcpy(&adaptermsg[0], (uint8_t *)irsp,
+                                      MAX_MSG_DATA);
+                               dev_warn(&((phba->pcidev)->dev),
+                                        "lpfc%d: %s\n",
+                                        phba->brd_no, adaptermsg);
+                       } else {
+                               /* Unknown IOCB command */
+                               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                               "0335 Unknown IOCB "
+                                               "command Data: x%x "
+                                               "x%x x%x x%x\n",
+                                               irsp->ulpCommand,
+                                               irsp->ulpStatus,
+                                               irsp->ulpIoTag,
+                                               irsp->ulpContext);
+                       }
+                       break;
+               }
+
+               if (free_saveq) {
+                       list_for_each_entry_safe(rspiocbp, next_iocb,
+                                                &saveq->list, list) {
+                               list_del(&rspiocbp->list);
+                               __lpfc_sli_release_iocbq(phba, rspiocbp);
+                       }
+                       __lpfc_sli_release_iocbq(phba, saveq);
+               }
+               rspiocbp = NULL;
+       }
+       spin_unlock_irqrestore(&phba->hbalock, iflag);
+       return rspiocbp;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event - Wrapper func for handling slow-path iocbs
  * @phba: Pointer to HBA context object.
  * @pring: Pointer to driver SLI ring object.
  * @mask: Host attention register mask for this ring.
  *
- * This function is called from the worker thread when there is a ring
- * event for non-fcp rings. The caller does not hold any lock .
- * The function processes each response iocb in the response ring until it
- * finds an iocb with LE bit set and chains all the iocbs upto the iocb with
- * LE bit set. The function will call lpfc_sli_process_sol_iocb function if the
- * response iocb indicates a completion of a command iocb. The function
- * will call lpfc_sli_process_unsol_iocb function if this is an unsolicited
- * iocb. The function frees the resources or calls the completion handler if
- * this iocb is an abort completion. The function returns 0 when the allocated
- * iocbs are not freed, otherwise returns 1.
+ * This routine wraps the actual slow_ring event process routine from the
+ * API jump table function pointer from the lpfc_hba struct.
  **/
-int
+void
 lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
                                struct lpfc_sli_ring *pring, uint32_t mask)
+{
+       phba->lpfc_sli_handle_slow_ring_event(phba, pring, mask);
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s3 - Handle SLI3 ring event for non-FCP rings
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a ring event
+ * for non-fcp rings. The caller does not hold any lock. The function will
+ * remove each response iocb in the response ring and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s3(struct lpfc_hba *phba,
+                                  struct lpfc_sli_ring *pring, uint32_t mask)
 {
        struct lpfc_pgp *pgp;
        IOCB_t *entry;
        IOCB_t *irsp = NULL;
        struct lpfc_iocbq *rspiocbp = NULL;
-       struct lpfc_iocbq *next_iocb;
-       struct lpfc_iocbq *cmdiocbp;
-       struct lpfc_iocbq *saveq;
-       uint8_t iocb_cmd_type;
-       lpfc_iocb_type type;
-       uint32_t status, free_saveq;
        uint32_t portRspPut, portRspMax;
-       int rc = 1;
        unsigned long iflag;
+       uint32_t status;
 
        pgp = &phba->port_gp[pring->ringno];
        spin_lock_irqsave(&phba->hbalock, iflag);
@@ -2128,7 +2916,7 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
                phba->work_hs = HS_FFER3;
                lpfc_handle_eratt(phba);
 
-               return 1;
+               return;
        }
 
        rmb();
@@ -2173,138 +2961,10 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
 
                writel(pring->rspidx, &phba->host_gp[pring->ringno].rspGetInx);
 
-               list_add_tail(&rspiocbp->list, &(pring->iocb_continueq));
-
-               pring->iocb_continueq_cnt++;
-               if (irsp->ulpLe) {
-                       /*
-                        * By default, the driver expects to free all resources
-                        * associated with this iocb completion.
-                        */
-                       free_saveq = 1;
-                       saveq = list_get_first(&pring->iocb_continueq,
-                                              struct lpfc_iocbq, list);
-                       irsp = &(saveq->iocb);
-                       list_del_init(&pring->iocb_continueq);
-                       pring->iocb_continueq_cnt = 0;
-
-                       pring->stats.iocb_rsp++;
-
-                       /*
-                        * If resource errors reported from HBA, reduce
-                        * queuedepths of the SCSI device.
-                        */
-                       if ((irsp->ulpStatus == IOSTAT_LOCAL_REJECT) &&
-                            (irsp->un.ulpWord[4] == IOERR_NO_RESOURCES)) {
-                               spin_unlock_irqrestore(&phba->hbalock, iflag);
-                               lpfc_rampdown_queue_depth(phba);
-                               spin_lock_irqsave(&phba->hbalock, iflag);
-                       }
-
-                       if (irsp->ulpStatus) {
-                               /* Rsp ring <ringno> error: IOCB */
-                               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
-                                               "0328 Rsp Ring %d error: "
-                                               "IOCB Data: "
-                                               "x%x x%x x%x x%x "
-                                               "x%x x%x x%x x%x "
-                                               "x%x x%x x%x x%x "
-                                               "x%x x%x x%x x%x\n",
-                                               pring->ringno,
-                                               irsp->un.ulpWord[0],
-                                               irsp->un.ulpWord[1],
-                                               irsp->un.ulpWord[2],
-                                               irsp->un.ulpWord[3],
-                                               irsp->un.ulpWord[4],
-                                               irsp->un.ulpWord[5],
-                                               *(((uint32_t *) irsp) + 6),
-                                               *(((uint32_t *) irsp) + 7),
-                                               *(((uint32_t *) irsp) + 8),
-                                               *(((uint32_t *) irsp) + 9),
-                                               *(((uint32_t *) irsp) + 10),
-                                               *(((uint32_t *) irsp) + 11),
-                                               *(((uint32_t *) irsp) + 12),
-                                               *(((uint32_t *) irsp) + 13),
-                                               *(((uint32_t *) irsp) + 14),
-                                               *(((uint32_t *) irsp) + 15));
-                       }
-
-                       /*
-                        * Fetch the IOCB command type and call the correct
-                        * completion routine.  Solicited and Unsolicited
-                        * IOCBs on the ELS ring get freed back to the
-                        * lpfc_iocb_list by the discovery kernel thread.
-                        */
-                       iocb_cmd_type = irsp->ulpCommand & CMD_IOCB_MASK;
-                       type = lpfc_sli_iocb_cmd_type(iocb_cmd_type);
-                       if (type == LPFC_SOL_IOCB) {
-                               spin_unlock_irqrestore(&phba->hbalock, iflag);
-                               rc = lpfc_sli_process_sol_iocb(phba, pring,
-                                                              saveq);
-                               spin_lock_irqsave(&phba->hbalock, iflag);
-                       } else if (type == LPFC_UNSOL_IOCB) {
-                               spin_unlock_irqrestore(&phba->hbalock, iflag);
-                               rc = lpfc_sli_process_unsol_iocb(phba, pring,
-                                                                saveq);
-                               spin_lock_irqsave(&phba->hbalock, iflag);
-                               if (!rc)
-                                       free_saveq = 0;
-                       } else if (type == LPFC_ABORT_IOCB) {
-                               if ((irsp->ulpCommand != CMD_XRI_ABORTED_CX) &&
-                                   ((cmdiocbp =
-                                     lpfc_sli_iocbq_lookup(phba, pring,
-                                                           saveq)))) {
-                                       /* Call the specified completion
-                                          routine */
-                                       if (cmdiocbp->iocb_cmpl) {
-                                               spin_unlock_irqrestore(
-                                                      &phba->hbalock,
-                                                      iflag);
-                                               (cmdiocbp->iocb_cmpl) (phba,
-                                                            cmdiocbp, saveq);
-                                               spin_lock_irqsave(
-                                                         &phba->hbalock,
-                                                         iflag);
-                                       } else
-                                               __lpfc_sli_release_iocbq(phba,
-                                                                     cmdiocbp);
-                               }
-                       } else if (type == LPFC_UNKNOWN_IOCB) {
-                               if (irsp->ulpCommand == CMD_ADAPTER_MSG) {
-
-                                       char adaptermsg[LPFC_MAX_ADPTMSG];
-
-                                       memset(adaptermsg, 0,
-                                              LPFC_MAX_ADPTMSG);
-                                       memcpy(&adaptermsg[0], (uint8_t *) irsp,
-                                              MAX_MSG_DATA);
-                                       dev_warn(&((phba->pcidev)->dev),
-                                                "lpfc%d: %s\n",
-                                                phba->brd_no, adaptermsg);
-                               } else {
-                                       /* Unknown IOCB command */
-                                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-                                                       "0335 Unknown IOCB "
-                                                       "command Data: x%x "
-                                                       "x%x x%x x%x\n",
-                                                       irsp->ulpCommand,
-                                                       irsp->ulpStatus,
-                                                       irsp->ulpIoTag,
-                                                       irsp->ulpContext);
-                               }
-                       }
-
-                       if (free_saveq) {
-                               list_for_each_entry_safe(rspiocbp, next_iocb,
-                                                        &saveq->list, list) {
-                                       list_del(&rspiocbp->list);
-                                       __lpfc_sli_release_iocbq(phba,
-                                                                rspiocbp);
-                               }
-                               __lpfc_sli_release_iocbq(phba, saveq);
-                       }
-                       rspiocbp = NULL;
-               }
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
+               /* Handle the response IOCB */
+               rspiocbp = lpfc_sli_sp_handle_rspiocb(phba, pring, rspiocbp);
+               spin_lock_irqsave(&phba->hbalock, iflag);
 
                /*
                 * If the port response put pointer has not been updated, sync
@@ -2338,7 +2998,37 @@ lpfc_sli_handle_slow_ring_event(struct lpfc_hba *phba,
        }
 
        spin_unlock_irqrestore(&phba->hbalock, iflag);
-       return rc;
+       return;
+}
+
+/**
+ * lpfc_sli_handle_slow_ring_event_s4 - Handle SLI4 slow-path els events
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @mask: Host attention register mask for this ring.
+ *
+ * This function is called from the worker thread when there is a pending
+ * ELS response iocb on the driver internal slow-path response iocb worker
+ * queue. The caller does not hold any lock. The function will remove each
+ * response iocb from the response worker queue and calls the handle
+ * response iocb routine (lpfc_sli_sp_handle_rspiocb) to process it.
+ **/
+static void
+lpfc_sli_handle_slow_ring_event_s4(struct lpfc_hba *phba,
+                                  struct lpfc_sli_ring *pring, uint32_t mask)
+{
+       struct lpfc_iocbq *irspiocbq;
+       unsigned long iflag;
+
+       while (!list_empty(&phba->sli4_hba.sp_rspiocb_work_queue)) {
+               /* Get the response iocb from the head of work queue */
+               spin_lock_irqsave(&phba->hbalock, iflag);
+               list_remove_head(&phba->sli4_hba.sp_rspiocb_work_queue,
+                                irspiocbq, struct lpfc_iocbq, list);
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
+               /* Process the response iocb */
+               lpfc_sli_sp_handle_rspiocb(phba, pring, irspiocbq);
+       }
 }
 
 /**
@@ -2420,7 +3110,7 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_brdready - Check for host status bits
+ * lpfc_sli_brdready_s3 - Check for sli3 host ready status
  * @phba: Pointer to HBA context object.
  * @mask: Bit mask to be checked.
  *
@@ -2432,8 +3122,8 @@ lpfc_sli_flush_fcp_rings(struct lpfc_hba *phba)
  * function returns 1 when HBA fail to restart otherwise returns
  * zero.
  **/
-int
-lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+static int
+lpfc_sli_brdready_s3(struct lpfc_hba *phba, uint32_t mask)
 {
        uint32_t status;
        int i = 0;
@@ -2477,6 +3167,56 @@ lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
        return retval;
 }
 
+/**
+ * lpfc_sli_brdready_s4 - Check for sli4 host ready status
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This function checks the host status register to check if HBA is
+ * ready. This function will wait in a loop for the HBA to be ready
+ * If the HBA is not ready , the function will will reset the HBA PCI
+ * function again. The function returns 1 when HBA fail to be ready
+ * otherwise returns zero.
+ **/
+static int
+lpfc_sli_brdready_s4(struct lpfc_hba *phba, uint32_t mask)
+{
+       uint32_t status;
+       int retval = 0;
+
+       /* Read the HBA Host Status Register */
+       status = lpfc_sli4_post_status_check(phba);
+
+       if (status) {
+               phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+               lpfc_sli_brdrestart(phba);
+               status = lpfc_sli4_post_status_check(phba);
+       }
+
+       /* Check to see if any errors occurred during init */
+       if (status) {
+               phba->link_state = LPFC_HBA_ERROR;
+               retval = 1;
+       } else
+               phba->sli4_hba.intr_enable = 0;
+
+       return retval;
+}
+
+/**
+ * lpfc_sli_brdready - Wrapper func for checking the hba readyness
+ * @phba: Pointer to HBA context object.
+ * @mask: Bit mask to be checked.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba readyness check routine
+ * from the API jump table function pointer from the lpfc_hba struct.
+ **/
+int
+lpfc_sli_brdready(struct lpfc_hba *phba, uint32_t mask)
+{
+       return phba->lpfc_sli_brdready(phba, mask);
+}
+
 #define BARRIER_TEST_PATTERN (0xdeadbeef)
 
 /**
@@ -2532,7 +3272,7 @@ void lpfc_reset_barrier(struct lpfc_hba *phba)
                mdelay(1);
 
        if (readl(resp_buf + 1) != ~(BARRIER_TEST_PATTERN)) {
-               if (phba->sli.sli_flag & LPFC_SLI2_ACTIVE ||
+               if (phba->sli.sli_flag & LPFC_SLI_ACTIVE ||
                    phba->pport->stopped)
                        goto restore_hc;
                else
@@ -2613,7 +3353,9 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
                return 1;
        }
 
-       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+       spin_lock_irq(&phba->hbalock);
+       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
+       spin_unlock_irq(&phba->hbalock);
 
        mempool_free(pmb, phba->mbox_mem_pool);
 
@@ -2636,10 +3378,10 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
        }
        spin_lock_irq(&phba->hbalock);
        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       psli->mbox_active = NULL;
        phba->link_flag &= ~LS_IGNORE_ERATT;
        spin_unlock_irq(&phba->hbalock);
 
-       psli->mbox_active = NULL;
        lpfc_hba_down_post(phba);
        phba->link_state = LPFC_HBA_ERROR;
 
@@ -2647,7 +3389,7 @@ lpfc_sli_brdkill(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_brdreset - Reset the HBA
+ * lpfc_sli_brdreset - Reset a sli-2 or sli-3 HBA
  * @phba: Pointer to HBA context object.
  *
  * This function resets the HBA by writing HC_INITFF to the control
@@ -2683,7 +3425,8 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
                              (cfg_value &
                               ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
 
-       psli->sli_flag &= ~(LPFC_SLI2_ACTIVE | LPFC_PROCESS_LA);
+       psli->sli_flag &= ~(LPFC_SLI_ACTIVE | LPFC_PROCESS_LA);
+
        /* Now toggle INITFF bit in the Host Control Register */
        writel(HC_INITFF, phba->HCregaddr);
        mdelay(1);
@@ -2710,27 +3453,86 @@ lpfc_sli_brdreset(struct lpfc_hba *phba)
 }
 
 /**
- * lpfc_sli_brdrestart - Restart the HBA
+ * lpfc_sli4_brdreset - Reset a sli-4 HBA
  * @phba: Pointer to HBA context object.
  *
- * This function is called in the SLI initialization code path to
- * restart the HBA. The caller is not required to hold any lock.
- * This function writes MBX_RESTART mailbox command to the SLIM and
- * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
- * function to free any pending commands. The function enables
- * POST only during the first initialization. The function returns zero.
- * The function does not guarantee completion of MBX_RESTART mailbox
- * command before the return of this function.
+ * This function resets a SLI4 HBA. This function disables PCI layer parity
+ * checking during resets the device. The caller is not required to hold
+ * any locks.
+ *
+ * This function returns 0 always.
  **/
 int
-lpfc_sli_brdrestart(struct lpfc_hba *phba)
+lpfc_sli4_brdreset(struct lpfc_hba *phba)
 {
-       MAILBOX_t *mb;
-       struct lpfc_sli *psli;
-       volatile uint32_t word0;
-       void __iomem *to_slim;
-
-       spin_lock_irq(&phba->hbalock);
+       struct lpfc_sli *psli = &phba->sli;
+       uint16_t cfg_value;
+       uint8_t qindx;
+
+       /* Reset HBA */
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "0295 Reset HBA Data: x%x x%x\n",
+                       phba->pport->port_state, psli->sli_flag);
+
+       /* perform board reset */
+       phba->fc_eventTag = 0;
+       phba->pport->fc_myDID = 0;
+       phba->pport->fc_prevDID = 0;
+
+       /* Turn off parity checking and serr during the physical reset */
+       pci_read_config_word(phba->pcidev, PCI_COMMAND, &cfg_value);
+       pci_write_config_word(phba->pcidev, PCI_COMMAND,
+                             (cfg_value &
+                             ~(PCI_COMMAND_PARITY | PCI_COMMAND_SERR)));
+
+       spin_lock_irq(&phba->hbalock);
+       psli->sli_flag &= ~(LPFC_PROCESS_LA);
+       phba->fcf.fcf_flag = 0;
+       /* Clean up the child queue list for the CQs */
+       list_del_init(&phba->sli4_hba.mbx_wq->list);
+       list_del_init(&phba->sli4_hba.els_wq->list);
+       list_del_init(&phba->sli4_hba.hdr_rq->list);
+       list_del_init(&phba->sli4_hba.dat_rq->list);
+       list_del_init(&phba->sli4_hba.mbx_cq->list);
+       list_del_init(&phba->sli4_hba.els_cq->list);
+       list_del_init(&phba->sli4_hba.rxq_cq->list);
+       for (qindx = 0; qindx < phba->cfg_fcp_wq_count; qindx++)
+               list_del_init(&phba->sli4_hba.fcp_wq[qindx]->list);
+       for (qindx = 0; qindx < phba->cfg_fcp_eq_count; qindx++)
+               list_del_init(&phba->sli4_hba.fcp_cq[qindx]->list);
+       spin_unlock_irq(&phba->hbalock);
+
+       /* Now physically reset the device */
+       lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+                       "0389 Performing PCI function reset!\n");
+       /* Perform FCoE PCI function reset */
+       lpfc_pci_function_reset(phba);
+
+       return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart_s3 - Restart a sli-3 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to
+ * restart the HBA. The caller is not required to hold any lock.
+ * This function writes MBX_RESTART mailbox command to the SLIM and
+ * resets the HBA. At the end of the function, it calls lpfc_hba_down_post
+ * function to free any pending commands. The function enables
+ * POST only during the first initialization. The function returns zero.
+ * The function does not guarantee completion of MBX_RESTART mailbox
+ * command before the return of this function.
+ **/
+static int
+lpfc_sli_brdrestart_s3(struct lpfc_hba *phba)
+{
+       MAILBOX_t *mb;
+       struct lpfc_sli *psli;
+       volatile uint32_t word0;
+       void __iomem *to_slim;
+
+       spin_lock_irq(&phba->hbalock);
 
        psli = &phba->sli;
 
@@ -2762,7 +3564,7 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
        lpfc_sli_brdreset(phba);
        phba->pport->stopped = 0;
        phba->link_state = LPFC_INIT_START;
-
+       phba->hba_flag = 0;
        spin_unlock_irq(&phba->hbalock);
 
        memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
@@ -2776,6 +3578,55 @@ lpfc_sli_brdrestart(struct lpfc_hba *phba)
        return 0;
 }
 
+/**
+ * lpfc_sli_brdrestart_s4 - Restart the sli-4 hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI initialization code path to restart
+ * a SLI4 HBA. The caller is not required to hold any lock.
+ * At the end of the function, it calls lpfc_hba_down_post function to
+ * free any pending commands.
+ **/
+static int
+lpfc_sli_brdrestart_s4(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+
+
+       /* Restart HBA */
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "0296 Restart HBA Data: x%x x%x\n",
+                       phba->pport->port_state, psli->sli_flag);
+
+       lpfc_sli4_brdreset(phba);
+
+       spin_lock_irq(&phba->hbalock);
+       phba->pport->stopped = 0;
+       phba->link_state = LPFC_INIT_START;
+       phba->hba_flag = 0;
+       spin_unlock_irq(&phba->hbalock);
+
+       memset(&psli->lnk_stat_offsets, 0, sizeof(psli->lnk_stat_offsets));
+       psli->stats_start = get_seconds();
+
+       lpfc_hba_down_post(phba);
+
+       return 0;
+}
+
+/**
+ * lpfc_sli_brdrestart - Wrapper func for restarting hba
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine wraps the actual SLI3 or SLI4 hba restart routine from the
+ * API jump table function pointer from the lpfc_hba struct.
+**/
+int
+lpfc_sli_brdrestart(struct lpfc_hba *phba)
+{
+       return phba->lpfc_sli_brdrestart(phba);
+}
+
 /**
  * lpfc_sli_chipset_init - Wait for the restart of the HBA after a restart
  * @phba: Pointer to HBA context object.
@@ -2940,7 +3791,7 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
        if (!pmb)
                return -ENOMEM;
 
-       pmbox = &pmb->mb;
+       pmbox = &pmb->u.mb;
 
        /* Initialize the struct lpfc_sli_hbq structure for each hbq */
        phba->link_state = LPFC_INIT_MBX_CMDS;
@@ -2983,6 +3834,26 @@ lpfc_sli_hbq_setup(struct lpfc_hba *phba)
        return 0;
 }
 
+/**
+ * lpfc_sli4_rb_setup - Initialize and post RBs to HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called during the SLI initialization to configure
+ * all the HBQs and post buffers to the HBQ. The caller is not
+ * required to hold any locks. This function will return zero if successful
+ * else it will return negative error code.
+ **/
+static int
+lpfc_sli4_rb_setup(struct lpfc_hba *phba)
+{
+       phba->hbq_in_use = 1;
+       phba->hbqs[0].entry_count = lpfc_hbq_defs[0]->entry_count;
+       phba->hbq_count = 1;
+       /* Initially populate or replenish the HBQs */
+       lpfc_sli_hbqbuf_init_hbqs(phba, 0);
+       return 0;
+}
+
 /**
  * lpfc_sli_config_port - Issue config port mailbox command
  * @phba: Pointer to HBA context object.
@@ -3047,33 +3918,43 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
                        lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
                                "0442 Adapter failed to init, mbxCmd x%x "
                                "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
-                               pmb->mb.mbxCommand, pmb->mb.mbxStatus, 0);
+                               pmb->u.mb.mbxCommand, pmb->u.mb.mbxStatus, 0);
                        spin_lock_irq(&phba->hbalock);
-                       phba->sli.sli_flag &= ~LPFC_SLI2_ACTIVE;
+                       phba->sli.sli_flag &= ~LPFC_SLI_ACTIVE;
                        spin_unlock_irq(&phba->hbalock);
                        rc = -ENXIO;
-               } else
+               } else {
+                       /* Allow asynchronous mailbox command to go through */
+                       spin_lock_irq(&phba->hbalock);
+                       phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+                       spin_unlock_irq(&phba->hbalock);
                        done = 1;
+               }
        }
        if (!done) {
                rc = -EINVAL;
                goto do_prep_failed;
        }
-       if (pmb->mb.un.varCfgPort.sli_mode == 3) {
-               if (!pmb->mb.un.varCfgPort.cMA) {
+       if (pmb->u.mb.un.varCfgPort.sli_mode == 3) {
+               if (!pmb->u.mb.un.varCfgPort.cMA) {
                        rc = -ENXIO;
                        goto do_prep_failed;
                }
-               if (phba->max_vpi && pmb->mb.un.varCfgPort.gmv) {
+               if (phba->max_vpi && pmb->u.mb.un.varCfgPort.gmv) {
                        phba->sli3_options |= LPFC_SLI3_NPIV_ENABLED;
-                       phba->max_vpi = pmb->mb.un.varCfgPort.max_vpi;
+                       phba->max_vpi = pmb->u.mb.un.varCfgPort.max_vpi;
+                       phba->max_vports = (phba->max_vpi > phba->max_vports) ?
+                               phba->max_vpi : phba->max_vports;
+
                } else
                        phba->max_vpi = 0;
-               if (pmb->mb.un.varCfgPort.gerbm)
+               if (pmb->u.mb.un.varCfgPort.gdss)
+                       phba->sli3_options |= LPFC_SLI3_DSS_ENABLED;
+               if (pmb->u.mb.un.varCfgPort.gerbm)
                        phba->sli3_options |= LPFC_SLI3_HBQ_ENABLED;
-               if (pmb->mb.un.varCfgPort.gcrp)
+               if (pmb->u.mb.un.varCfgPort.gcrp)
                        phba->sli3_options |= LPFC_SLI3_CRP_ENABLED;
-               if (pmb->mb.un.varCfgPort.ginb) {
+               if (pmb->u.mb.un.varCfgPort.ginb) {
                        phba->sli3_options |= LPFC_SLI3_INB_ENABLED;
                        phba->hbq_get = phba->mbox->us.s3_inb_pgp.hbq_get;
                        phba->port_gp = phba->mbox->us.s3_inb_pgp.port;
@@ -3089,7 +3970,7 @@ lpfc_sli_config_port(struct lpfc_hba *phba, int sli_mode)
                }
 
                if (phba->cfg_enable_bg) {
-                       if (pmb->mb.un.varCfgPort.gbg)
+                       if (pmb->u.mb.un.varCfgPort.gbg)
                                phba->sli3_options |= LPFC_SLI3_BG_ENABLED;
                        else
                                lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -3184,8 +4065,9 @@ lpfc_sli_hba_setup(struct lpfc_hba *phba)
                if (rc)
                        goto lpfc_sli_hba_setup_error;
        }
-
+       spin_lock_irq(&phba->hbalock);
        phba->sli.sli_flag |= LPFC_PROCESS_LA;
+       spin_unlock_irq(&phba->hbalock);
 
        rc = lpfc_config_port_post(phba);
        if (rc)
@@ -3200,6 +4082,488 @@ lpfc_sli_hba_setup_error:
        return rc;
 }
 
+/**
+ * lpfc_sli4_read_fcoe_params - Read fcoe params from conf region
+ * @phba: Pointer to HBA context object.
+ * @mboxq: mailbox pointer.
+ * This function issue a dump mailbox command to read config region
+ * 23 and parse the records in the region and populate driver
+ * data structure.
+ **/
+static int
+lpfc_sli4_read_fcoe_params(struct lpfc_hba *phba,
+               LPFC_MBOXQ_t *mboxq)
+{
+       struct lpfc_dmabuf *mp;
+       struct lpfc_mqe *mqe;
+       uint32_t data_length;
+       int rc;
+
+       /* Program the default value of vlan_id and fc_map */
+       phba->valid_vlan = 0;
+       phba->fc_map[0] = LPFC_FCOE_FCF_MAP0;
+       phba->fc_map[1] = LPFC_FCOE_FCF_MAP1;
+       phba->fc_map[2] = LPFC_FCOE_FCF_MAP2;
+
+       mqe = &mboxq->u.mqe;
+       if (lpfc_dump_fcoe_param(phba, mboxq))
+               return -ENOMEM;
+
+       mp = (struct lpfc_dmabuf *) mboxq->context1;
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                       "(%d):2571 Mailbox cmd x%x Status x%x "
+                       "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+                       "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+                       "CQ: x%x x%x x%x x%x\n",
+                       mboxq->vport ? mboxq->vport->vpi : 0,
+                       bf_get(lpfc_mqe_command, mqe),
+                       bf_get(lpfc_mqe_status, mqe),
+                       mqe->un.mb_words[0], mqe->un.mb_words[1],
+                       mqe->un.mb_words[2], mqe->un.mb_words[3],
+                       mqe->un.mb_words[4], mqe->un.mb_words[5],
+                       mqe->un.mb_words[6], mqe->un.mb_words[7],
+                       mqe->un.mb_words[8], mqe->un.mb_words[9],
+                       mqe->un.mb_words[10], mqe->un.mb_words[11],
+                       mqe->un.mb_words[12], mqe->un.mb_words[13],
+                       mqe->un.mb_words[14], mqe->un.mb_words[15],
+                       mqe->un.mb_words[16], mqe->un.mb_words[50],
+                       mboxq->mcqe.word0,
+                       mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
+                       mboxq->mcqe.trailer);
+
+       if (rc) {
+               lpfc_mbuf_free(phba, mp->virt, mp->phys);
+               kfree(mp);
+               return -EIO;
+       }
+       data_length = mqe->un.mb_words[5];
+       if (data_length > DMP_FCOEPARAM_RGN_SIZE)
+               return -EIO;
+
+       lpfc_parse_fcoe_conf(phba, mp->virt, data_length);
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       return 0;
+}
+
+/**
+ * lpfc_sli4_read_rev - Issue READ_REV and collect vpd data
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to the LPFC_MBOXQ_t structure.
+ * @vpd: pointer to the memory to hold resulting port vpd data.
+ * @vpd_size: On input, the number of bytes allocated to @vpd.
+ *           On output, the number of data bytes in @vpd.
+ *
+ * This routine executes a READ_REV SLI4 mailbox command.  In
+ * addition, this routine gets the port vpd data.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     ENOMEM - could not allocated memory.
+ **/
+static int
+lpfc_sli4_read_rev(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+                   uint8_t *vpd, uint32_t *vpd_size)
+{
+       int rc = 0;
+       uint32_t dma_size;
+       struct lpfc_dmabuf *dmabuf;
+       struct lpfc_mqe *mqe;
+
+       dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+       if (!dmabuf)
+               return -ENOMEM;
+
+       /*
+        * Get a DMA buffer for the vpd data resulting from the READ_REV
+        * mailbox command.
+        */
+       dma_size = *vpd_size;
+       dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+                                         dma_size,
+                                         &dmabuf->phys,
+                                         GFP_KERNEL);
+       if (!dmabuf->virt) {
+               kfree(dmabuf);
+               return -ENOMEM;
+       }
+       memset(dmabuf->virt, 0, dma_size);
+
+       /*
+        * The SLI4 implementation of READ_REV conflicts at word1,
+        * bits 31:16 and SLI4 adds vpd functionality not present
+        * in SLI3.  This code corrects the conflicts.
+        */
+       lpfc_read_rev(phba, mboxq);
+       mqe = &mboxq->u.mqe;
+       mqe->un.read_rev.vpd_paddr_high = putPaddrHigh(dmabuf->phys);
+       mqe->un.read_rev.vpd_paddr_low = putPaddrLow(dmabuf->phys);
+       mqe->un.read_rev.word1 &= 0x0000FFFF;
+       bf_set(lpfc_mbx_rd_rev_vpd, &mqe->un.read_rev, 1);
+       bf_set(lpfc_mbx_rd_rev_avail_len, &mqe->un.read_rev, dma_size);
+
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (rc) {
+               dma_free_coherent(&phba->pcidev->dev, dma_size,
+                                 dmabuf->virt, dmabuf->phys);
+               return -EIO;
+       }
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                       "(%d):0380 Mailbox cmd x%x Status x%x "
+                       "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+                       "x%x x%x x%x x%x x%x x%x x%x x%x x%x "
+                       "CQ: x%x x%x x%x x%x\n",
+                       mboxq->vport ? mboxq->vport->vpi : 0,
+                       bf_get(lpfc_mqe_command, mqe),
+                       bf_get(lpfc_mqe_status, mqe),
+                       mqe->un.mb_words[0], mqe->un.mb_words[1],
+                       mqe->un.mb_words[2], mqe->un.mb_words[3],
+                       mqe->un.mb_words[4], mqe->un.mb_words[5],
+                       mqe->un.mb_words[6], mqe->un.mb_words[7],
+                       mqe->un.mb_words[8], mqe->un.mb_words[9],
+                       mqe->un.mb_words[10], mqe->un.mb_words[11],
+                       mqe->un.mb_words[12], mqe->un.mb_words[13],
+                       mqe->un.mb_words[14], mqe->un.mb_words[15],
+                       mqe->un.mb_words[16], mqe->un.mb_words[50],
+                       mboxq->mcqe.word0,
+                       mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
+                       mboxq->mcqe.trailer);
+
+       /*
+        * The available vpd length cannot be bigger than the
+        * DMA buffer passed to the port.  Catch the less than
+        * case and update the caller's size.
+        */
+       if (mqe->un.read_rev.avail_vpd_len < *vpd_size)
+               *vpd_size = mqe->un.read_rev.avail_vpd_len;
+
+       lpfc_sli_pcimem_bcopy(dmabuf->virt, vpd, *vpd_size);
+       dma_free_coherent(&phba->pcidev->dev, dma_size,
+                         dmabuf->virt, dmabuf->phys);
+       kfree(dmabuf);
+       return 0;
+}
+
+/**
+ * lpfc_sli4_arm_cqeq_intr - Arm sli-4 device completion and event queues
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is called to explicitly arm the SLI4 device's completion and
+ * event queues
+ **/
+static void
+lpfc_sli4_arm_cqeq_intr(struct lpfc_hba *phba)
+{
+       uint8_t fcp_eqidx;
+
+       lpfc_sli4_cq_release(phba->sli4_hba.mbx_cq, LPFC_QUEUE_REARM);
+       lpfc_sli4_cq_release(phba->sli4_hba.els_cq, LPFC_QUEUE_REARM);
+       lpfc_sli4_cq_release(phba->sli4_hba.rxq_cq, LPFC_QUEUE_REARM);
+       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+               lpfc_sli4_cq_release(phba->sli4_hba.fcp_cq[fcp_eqidx],
+                                    LPFC_QUEUE_REARM);
+       lpfc_sli4_eq_release(phba->sli4_hba.sp_eq, LPFC_QUEUE_REARM);
+       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++)
+               lpfc_sli4_eq_release(phba->sli4_hba.fp_eq[fcp_eqidx],
+                                    LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_hba_setup - SLI4 device intialization PCI function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is the main SLI4 device intialization PCI function. This
+ * function is called by the HBA intialization code, HBA reset code and
+ * HBA error attention handler code. Caller is not required to hold any
+ * locks.
+ **/
+int
+lpfc_sli4_hba_setup(struct lpfc_hba *phba)
+{
+       int rc;
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_mqe *mqe;
+       uint8_t *vpd;
+       uint32_t vpd_size;
+       uint32_t ftr_rsp = 0;
+       struct Scsi_Host *shost = lpfc_shost_from_vport(phba->pport);
+       struct lpfc_vport *vport = phba->pport;
+       struct lpfc_dmabuf *mp;
+
+       /* Perform a PCI function reset to start from clean */
+       rc = lpfc_pci_function_reset(phba);
+       if (unlikely(rc))
+               return -ENODEV;
+
+       /* Check the HBA Host Status Register for readyness */
+       rc = lpfc_sli4_post_status_check(phba);
+       if (unlikely(rc))
+               return -ENODEV;
+       else {
+               spin_lock_irq(&phba->hbalock);
+               phba->sli.sli_flag |= LPFC_SLI_ACTIVE;
+               spin_unlock_irq(&phba->hbalock);
+       }
+
+       /*
+        * Allocate a single mailbox container for initializing the
+        * port.
+        */
+       mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               return -ENOMEM;
+
+       /*
+        * Continue initialization with default values even if driver failed
+        * to read FCoE param config regions
+        */
+       if (lpfc_sli4_read_fcoe_params(phba, mboxq))
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
+                       "2570 Failed to read FCoE parameters \n");
+
+       /* Issue READ_REV to collect vpd and FW information. */
+       vpd_size = PAGE_SIZE;
+       vpd = kzalloc(vpd_size, GFP_KERNEL);
+       if (!vpd) {
+               rc = -ENOMEM;
+               goto out_free_mbox;
+       }
+
+       rc = lpfc_sli4_read_rev(phba, mboxq, vpd, &vpd_size);
+       if (unlikely(rc))
+               goto out_free_vpd;
+
+       mqe = &mboxq->u.mqe;
+       if ((bf_get(lpfc_mbx_rd_rev_sli_lvl,
+                   &mqe->un.read_rev) != LPFC_SLI_REV4) ||
+           (bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev) == 0)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                       "0376 READ_REV Error. SLI Level %d "
+                       "FCoE enabled %d\n",
+                       bf_get(lpfc_mbx_rd_rev_sli_lvl, &mqe->un.read_rev),
+                       bf_get(lpfc_mbx_rd_rev_fcoe, &mqe->un.read_rev));
+               rc = -EIO;
+               goto out_free_vpd;
+       }
+       /* Single threaded at this point, no need for lock */
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag |= HBA_FCOE_SUPPORT;
+       spin_unlock_irq(&phba->hbalock);
+       /*
+        * Evaluate the read rev and vpd data. Populate the driver
+        * state with the results. If this routine fails, the failure
+        * is not fatal as the driver will use generic values.
+        */
+       rc = lpfc_parse_vpd(phba, vpd, vpd_size);
+       if (unlikely(!rc)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "0377 Error %d parsing vpd. "
+                               "Using defaults.\n", rc);
+               rc = 0;
+       }
+
+       /* By now, we should determine the SLI revision, hard code for now */
+       phba->sli_rev = LPFC_SLI_REV4;
+
+       /*
+        * Discover the port's supported feature set and match it against the
+        * hosts requests.
+        */
+       lpfc_request_features(phba, mboxq);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       if (unlikely(rc)) {
+               rc = -EIO;
+               goto out_free_vpd;
+       }
+
+       /*
+        * The port must support FCP initiator mode as this is the
+        * only mode running in the host.
+        */
+       if (!(bf_get(lpfc_mbx_rq_ftr_rsp_fcpi, &mqe->un.req_ftrs))) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+                               "0378 No support for fcpi mode.\n");
+               ftr_rsp++;
+       }
+
+       /*
+        * If the port cannot support the host's requested features
+        * then turn off the global config parameters to disable the
+        * feature in the driver.  This is not a fatal error.
+        */
+       if ((phba->cfg_enable_bg) &&
+           !(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+               ftr_rsp++;
+
+       if (phba->max_vpi && phba->cfg_enable_npiv &&
+           !(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+               ftr_rsp++;
+
+       if (ftr_rsp) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+                               "0379 Feature Mismatch Data: x%08x %08x "
+                               "x%x x%x x%x\n", mqe->un.req_ftrs.word2,
+                               mqe->un.req_ftrs.word3, phba->cfg_enable_bg,
+                               phba->cfg_enable_npiv, phba->max_vpi);
+               if (!(bf_get(lpfc_mbx_rq_ftr_rsp_dif, &mqe->un.req_ftrs)))
+                       phba->cfg_enable_bg = 0;
+               if (!(bf_get(lpfc_mbx_rq_ftr_rsp_npiv, &mqe->un.req_ftrs)))
+                       phba->cfg_enable_npiv = 0;
+       }
+
+       /* These SLI3 features are assumed in SLI4 */
+       spin_lock_irq(&phba->hbalock);
+       phba->sli3_options |= (LPFC_SLI3_NPIV_ENABLED | LPFC_SLI3_HBQ_ENABLED);
+       spin_unlock_irq(&phba->hbalock);
+
+       /* Read the port's service parameters. */
+       lpfc_read_sparam(phba, mboxq, vport->vpi);
+       mboxq->vport = vport;
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       mp = (struct lpfc_dmabuf *) mboxq->context1;
+       if (rc == MBX_SUCCESS) {
+               memcpy(&vport->fc_sparam, mp->virt, sizeof(struct serv_parm));
+               rc = 0;
+       }
+
+       /*
+        * This memory was allocated by the lpfc_read_sparam routine. Release
+        * it to the mbuf pool.
+        */
+       lpfc_mbuf_free(phba, mp->virt, mp->phys);
+       kfree(mp);
+       mboxq->context1 = NULL;
+       if (unlikely(rc)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "0382 READ_SPARAM command failed "
+                               "status %d, mbxStatus x%x\n",
+                               rc, bf_get(lpfc_mqe_status, mqe));
+               phba->link_state = LPFC_HBA_ERROR;
+               rc = -EIO;
+               goto out_free_vpd;
+       }
+
+       if (phba->cfg_soft_wwnn)
+               u64_to_wwn(phba->cfg_soft_wwnn,
+                          vport->fc_sparam.nodeName.u.wwn);
+       if (phba->cfg_soft_wwpn)
+               u64_to_wwn(phba->cfg_soft_wwpn,
+                          vport->fc_sparam.portName.u.wwn);
+       memcpy(&vport->fc_nodename, &vport->fc_sparam.nodeName,
+              sizeof(struct lpfc_name));
+       memcpy(&vport->fc_portname, &vport->fc_sparam.portName,
+              sizeof(struct lpfc_name));
+
+       /* Update the fc_host data structures with new wwn. */
+       fc_host_node_name(shost) = wwn_to_u64(vport->fc_nodename.u.wwn);
+       fc_host_port_name(shost) = wwn_to_u64(vport->fc_portname.u.wwn);
+
+       /* Register SGL pool to the device using non-embedded mailbox command */
+       rc = lpfc_sli4_post_sgl_list(phba);
+       if (unlikely(rc)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "0582 Error %d during sgl post operation", rc);
+               rc = -ENODEV;
+               goto out_free_vpd;
+       }
+
+       /* Register SCSI SGL pool to the device */
+       rc = lpfc_sli4_repost_scsi_sgl_list(phba);
+       if (unlikely(rc)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX | LOG_SLI,
+                               "0383 Error %d during scsi sgl post opeation",
+                               rc);
+               /* Some Scsi buffers were moved to the abort scsi list */
+               /* A pci function reset will repost them */
+               rc = -ENODEV;
+               goto out_free_vpd;
+       }
+
+       /* Post the rpi header region to the device. */
+       rc = lpfc_sli4_post_all_rpi_hdrs(phba);
+       if (unlikely(rc)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "0393 Error %d during rpi post operation\n",
+                               rc);
+               rc = -ENODEV;
+               goto out_free_vpd;
+       }
+       /* Temporary initialization of lpfc_fip_flag to non-fip */
+       bf_set(lpfc_fip_flag, &phba->sli4_hba.sli4_flags, 0);
+
+       /* Set up all the queues to the device */
+       rc = lpfc_sli4_queue_setup(phba);
+       if (unlikely(rc)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "0381 Error %d during queue setup.\n ", rc);
+               goto out_stop_timers;
+       }
+
+       /* Arm the CQs and then EQs on device */
+       lpfc_sli4_arm_cqeq_intr(phba);
+
+       /* Indicate device interrupt mode */
+       phba->sli4_hba.intr_enable = 1;
+
+       /* Allow asynchronous mailbox command to go through */
+       spin_lock_irq(&phba->hbalock);
+       phba->sli.sli_flag &= ~LPFC_SLI_ASYNC_MBX_BLK;
+       spin_unlock_irq(&phba->hbalock);
+
+       /* Post receive buffers to the device */
+       lpfc_sli4_rb_setup(phba);
+
+       /* Start the ELS watchdog timer */
+       /*
+        * The driver for SLI4 is not yet ready to process timeouts
+        * or interrupts.  Once it is, the comment bars can be removed.
+        */
+       /* mod_timer(&vport->els_tmofunc,
+        *           jiffies + HZ * (phba->fc_ratov*2)); */
+
+       /* Start heart beat timer */
+       mod_timer(&phba->hb_tmofunc,
+                 jiffies + HZ * LPFC_HB_MBOX_INTERVAL);
+       phba->hb_outstanding = 0;
+       phba->last_completion_time = jiffies;
+
+       /* Start error attention (ERATT) polling timer */
+       mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
+
+       /*
+        * The port is ready, set the host's link state to LINK_DOWN
+        * in preparation for link interrupts.
+        */
+       lpfc_init_link(phba, mboxq, phba->cfg_topology, phba->cfg_link_speed);
+       mboxq->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       lpfc_set_loopback_flag(phba);
+       /* Change driver state to LPFC_LINK_DOWN right before init link */
+       spin_lock_irq(&phba->hbalock);
+       phba->link_state = LPFC_LINK_DOWN;
+       spin_unlock_irq(&phba->hbalock);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+       if (unlikely(rc != MBX_NOT_FINISHED)) {
+               kfree(vpd);
+               return 0;
+       } else
+               rc = -EIO;
+
+       /* Unset all the queues set up in this routine when error out */
+       if (rc)
+               lpfc_sli4_queue_unset(phba);
+
+out_stop_timers:
+       if (rc)
+               lpfc_stop_hba_timers(phba);
+out_free_vpd:
+       kfree(vpd);
+out_free_mbox:
+       mempool_free(mboxq, phba->mbox_mem_pool);
+       return rc;
+}
 
 /**
  * lpfc_mbox_timeout - Timeout call back function for mbox timer
@@ -3244,7 +4608,7 @@ void
 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
 {
        LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
-       MAILBOX_t *mb = &pmbox->mb;
+       MAILBOX_t *mb = &pmbox->u.mb;
        struct lpfc_sli *psli = &phba->sli;
        struct lpfc_sli_ring *pring;
 
@@ -3281,7 +4645,7 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
        spin_unlock_irq(&phba->pport->work_port_lock);
        spin_lock_irq(&phba->hbalock);
        phba->link_state = LPFC_LINK_UNKNOWN;
-       psli->sli_flag &= ~LPFC_SLI2_ACTIVE;
+       psli->sli_flag &= ~LPFC_SLI_ACTIVE;
        spin_unlock_irq(&phba->hbalock);
 
        pring = &psli->ring[psli->fcp_ring];
@@ -3289,32 +4653,20 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
 
        lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
                        "0345 Resetting board due to mailbox timeout\n");
-       /*
-        * lpfc_offline calls lpfc_sli_hba_down which will clean up
-        * on oustanding mailbox commands.
-        */
-       /* If resets are disabled then set error state and return. */
-       if (!phba->cfg_enable_hba_reset) {
-               phba->link_state = LPFC_HBA_ERROR;
-               return;
-       }
-       lpfc_offline_prep(phba);
-       lpfc_offline(phba);
-       lpfc_sli_brdrestart(phba);
-       lpfc_online(phba);
-       lpfc_unblock_mgmt_io(phba);
-       return;
+
+       /* Reset the HBA device */
+       lpfc_reset_hba(phba);
 }
 
 /**
- * lpfc_sli_issue_mbox - Issue a mailbox command to firmware
+ * lpfc_sli_issue_mbox_s3 - Issue an SLI3 mailbox command to firmware
  * @phba: Pointer to HBA context object.
  * @pmbox: Pointer to mailbox object.
  * @flag: Flag indicating how the mailbox need to be processed.
  *
  * This function is called by discovery code and HBA management code
- * to submit a mailbox command to firmware. This function gets the
- * hbalock to protect the data structures.
+ * to submit a mailbox command to firmware with SLI-3 interface spec. This
+ * function gets the hbalock to protect the data structures.
  * The mailbox command can be submitted in polling mode, in which case
  * this function will wait in a polling loop for the completion of the
  * mailbox.
@@ -3332,8 +4684,9 @@ lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
  * return codes the caller owns the mailbox command after the return of
  * the function.
  **/
-int
-lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+static int
+lpfc_sli_issue_mbox_s3(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox,
+                      uint32_t flag)
 {
        MAILBOX_t *mb;
        struct lpfc_sli *psli = &phba->sli;
@@ -3349,8 +4702,12 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
        spin_lock_irqsave(&phba->hbalock, drvr_flag);
        if (!pmbox) {
                /* processing mbox queue from intr_handler */
-               processing_queue = 1;
-               phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+               if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+                       spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
+                       return MBX_SUCCESS;
+               }
+               processing_queue = 1;
+               phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
                pmbox = lpfc_mbox_get(phba);
                if (!pmbox) {
                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -3365,7 +4722,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
                        lpfc_printf_log(phba, KERN_ERR,
                                        LOG_MBOX | LOG_VPORT,
                                        "1806 Mbox x%x failed. No vport\n",
-                                       pmbox->mb.mbxCommand);
+                                       pmbox->u.mb.mbxCommand);
                        dump_stack();
                        goto out_not_finished;
                }
@@ -3385,21 +4742,29 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 
        psli = &phba->sli;
 
-       mb = &pmbox->mb;
+       mb = &pmbox->u.mb;
        status = MBX_SUCCESS;
 
        if (phba->link_state == LPFC_HBA_ERROR) {
                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 
                /* Mbox command <mbxCommand> cannot issue */
-               LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "(%d):0311 Mailbox command x%x cannot "
+                               "issue Data: x%x x%x\n",
+                               pmbox->vport ? pmbox->vport->vpi : 0,
+                               pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
                goto out_not_finished;
        }
 
        if (mb->mbxCommand != MBX_KILL_BOARD && flag & MBX_NOWAIT &&
            !(readl(phba->HCregaddr) & HC_MBINT_ENA)) {
                spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
-               LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "(%d):2528 Mailbox command x%x cannot "
+                               "issue Data: x%x x%x\n",
+                               pmbox->vport ? pmbox->vport->vpi : 0,
+                               pmbox->u.mb.mbxCommand, psli->sli_flag, flag);
                goto out_not_finished;
        }
 
@@ -3413,14 +4778,24 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 
                        /* Mbox command <mbxCommand> cannot issue */
-                       LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                                       "(%d):2529 Mailbox command x%x "
+                                       "cannot issue Data: x%x x%x\n",
+                                       pmbox->vport ? pmbox->vport->vpi : 0,
+                                       pmbox->u.mb.mbxCommand,
+                                       psli->sli_flag, flag);
                        goto out_not_finished;
                }
 
-               if (!(psli->sli_flag & LPFC_SLI2_ACTIVE)) {
+               if (!(psli->sli_flag & LPFC_SLI_ACTIVE)) {
                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
                        /* Mbox command <mbxCommand> cannot issue */
-                       LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                                       "(%d):2530 Mailbox command x%x "
+                                       "cannot issue Data: x%x x%x\n",
+                                       pmbox->vport ? pmbox->vport->vpi : 0,
+                                       pmbox->u.mb.mbxCommand,
+                                       psli->sli_flag, flag);
                        goto out_not_finished;
                }
 
@@ -3462,12 +4837,17 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 
        /* If we are not polling, we MUST be in SLI2 mode */
        if (flag != MBX_POLL) {
-               if (!(psli->sli_flag & LPFC_SLI2_ACTIVE) &&
+               if (!(psli->sli_flag & LPFC_SLI_ACTIVE) &&
                    (mb->mbxCommand != MBX_KILL_BOARD)) {
                        psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
                        spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
                        /* Mbox command <mbxCommand> cannot issue */
-                       LOG_MBOX_CANNOT_ISSUE_DATA(phba, pmbox, psli, flag);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                                       "(%d):2531 Mailbox command x%x "
+                                       "cannot issue Data: x%x x%x\n",
+                                       pmbox->vport ? pmbox->vport->vpi : 0,
+                                       pmbox->u.mb.mbxCommand,
+                                       psli->sli_flag, flag);
                        goto out_not_finished;
                }
                /* timeout active mbox command */
@@ -3506,7 +4886,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
        /* next set own bit for the adapter and copy over command word */
        mb->mbxOwner = OWN_CHIP;
 
-       if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+       if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                /* First copy command data to host SLIM area */
                lpfc_sli_pcimem_bcopy(mb, phba->mbox, MAILBOX_CMD_SIZE);
        } else {
@@ -3529,7 +4909,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 
                if (mb->mbxCommand == MBX_CONFIG_PORT) {
                        /* switch over to host mailbox */
-                       psli->sli_flag |= LPFC_SLI2_ACTIVE;
+                       psli->sli_flag |= LPFC_SLI_ACTIVE;
                }
        }
 
@@ -3552,7 +4932,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
                writel(CA_MBATT, phba->CAregaddr);
                readl(phba->CAregaddr); /* flush */
 
-               if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+               if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                        /* First read mbox status word */
                        word0 = *((uint32_t *)phba->mbox);
                        word0 = le32_to_cpu(word0);
@@ -3591,7 +4971,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
                                spin_lock_irqsave(&phba->hbalock, drvr_flag);
                        }
 
-                       if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+                       if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                                /* First copy command data */
                                word0 = *((uint32_t *)phba->mbox);
                                word0 = le32_to_cpu(word0);
@@ -3604,7 +4984,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
                                        if (((slimword0 & OWN_CHIP) != OWN_CHIP)
                                            && slimmb->mbxStatus) {
                                                psli->sli_flag &=
-                                                   ~LPFC_SLI2_ACTIVE;
+                                                   ~LPFC_SLI_ACTIVE;
                                                word0 = slimword0;
                                        }
                                }
@@ -3616,7 +4996,7 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
                        ha_copy = readl(phba->HAregaddr);
                }
 
-               if (psli->sli_flag & LPFC_SLI2_ACTIVE) {
+               if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                        /* copy results back to user */
                        lpfc_sli_pcimem_bcopy(phba->mbox, mb, MAILBOX_CMD_SIZE);
                } else {
@@ -3643,12 +5023,419 @@ lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
 
 out_not_finished:
        if (processing_queue) {
-               pmbox->mb.mbxStatus = MBX_NOT_FINISHED;
+               pmbox->u.mb.mbxStatus = MBX_NOT_FINISHED;
                lpfc_mbox_cmpl_put(phba, pmbox);
        }
        return MBX_NOT_FINISHED;
 }
 
+/**
+ * lpfc_sli4_post_sync_mbox - Post an SLI4 mailbox to the bootstrap mailbox
+ * @phba: Pointer to HBA context object.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * The function posts a mailbox to the port.  The mailbox is expected
+ * to be comletely filled in and ready for the port to operate on it.
+ * This routine executes a synchronous completion operation on the
+ * mailbox by polling for its completion.
+ *
+ * The caller must not be holding any locks when calling this routine.
+ *
+ * Returns:
+ *     MBX_SUCCESS - mailbox posted successfully
+ *     Any of the MBX error values.
+ **/
+static int
+lpfc_sli4_post_sync_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       int rc = MBX_SUCCESS;
+       unsigned long iflag;
+       uint32_t db_ready;
+       uint32_t mcqe_status;
+       uint32_t mbx_cmnd;
+       unsigned long timeout;
+       struct lpfc_sli *psli = &phba->sli;
+       struct lpfc_mqe *mb = &mboxq->u.mqe;
+       struct lpfc_bmbx_create *mbox_rgn;
+       struct dma_address *dma_address;
+       struct lpfc_register bmbx_reg;
+
+       /*
+        * Only one mailbox can be active to the bootstrap mailbox region
+        * at a time and there is no queueing provided.
+        */
+       spin_lock_irqsave(&phba->hbalock, iflag);
+       if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "(%d):2532 Mailbox command x%x (x%x) "
+                               "cannot issue Data: x%x x%x\n",
+                               mboxq->vport ? mboxq->vport->vpi : 0,
+                               mboxq->u.mb.mbxCommand,
+                               lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                               psli->sli_flag, MBX_POLL);
+               return MBXERR_ERROR;
+       }
+       /* The server grabs the token and owns it until release */
+       psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+       phba->sli.mbox_active = mboxq;
+       spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+       /*
+        * Initialize the bootstrap memory region to avoid stale data areas
+        * in the mailbox post.  Then copy the caller's mailbox contents to
+        * the bmbx mailbox region.
+        */
+       mbx_cmnd = bf_get(lpfc_mqe_command, mb);
+       memset(phba->sli4_hba.bmbx.avirt, 0, sizeof(struct lpfc_bmbx_create));
+       lpfc_sli_pcimem_bcopy(mb, phba->sli4_hba.bmbx.avirt,
+                             sizeof(struct lpfc_mqe));
+
+       /* Post the high mailbox dma address to the port and wait for ready. */
+       dma_address = &phba->sli4_hba.bmbx.dma_address;
+       writel(dma_address->addr_hi, phba->sli4_hba.BMBXregaddr);
+
+       timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
+                                  * 1000) + jiffies;
+       do {
+               bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+               db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+               if (!db_ready)
+                       msleep(2);
+
+               if (time_after(jiffies, timeout)) {
+                       rc = MBXERR_ERROR;
+                       goto exit;
+               }
+       } while (!db_ready);
+
+       /* Post the low mailbox dma address to the port. */
+       writel(dma_address->addr_lo, phba->sli4_hba.BMBXregaddr);
+       timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, mbx_cmnd)
+                                  * 1000) + jiffies;
+       do {
+               bmbx_reg.word0 = readl(phba->sli4_hba.BMBXregaddr);
+               db_ready = bf_get(lpfc_bmbx_rdy, &bmbx_reg);
+               if (!db_ready)
+                       msleep(2);
+
+               if (time_after(jiffies, timeout)) {
+                       rc = MBXERR_ERROR;
+                       goto exit;
+               }
+       } while (!db_ready);
+
+       /*
+        * Read the CQ to ensure the mailbox has completed.
+        * If so, update the mailbox status so that the upper layers
+        * can complete the request normally.
+        */
+       lpfc_sli_pcimem_bcopy(phba->sli4_hba.bmbx.avirt, mb,
+                             sizeof(struct lpfc_mqe));
+       mbox_rgn = (struct lpfc_bmbx_create *) phba->sli4_hba.bmbx.avirt;
+       lpfc_sli_pcimem_bcopy(&mbox_rgn->mcqe, &mboxq->mcqe,
+                             sizeof(struct lpfc_mcqe));
+       mcqe_status = bf_get(lpfc_mcqe_status, &mbox_rgn->mcqe);
+
+       /* Prefix the mailbox status with range x4000 to note SLI4 status. */
+       if (mcqe_status != MB_CQE_STATUS_SUCCESS) {
+               bf_set(lpfc_mqe_status, mb, LPFC_MBX_ERROR_RANGE | mcqe_status);
+               rc = MBXERR_ERROR;
+       }
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                       "(%d):0356 Mailbox cmd x%x (x%x) Status x%x "
+                       "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x x%x"
+                       " x%x x%x CQ: x%x x%x x%x x%x\n",
+                       mboxq->vport ? mboxq->vport->vpi : 0,
+                       mbx_cmnd, lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                       bf_get(lpfc_mqe_status, mb),
+                       mb->un.mb_words[0], mb->un.mb_words[1],
+                       mb->un.mb_words[2], mb->un.mb_words[3],
+                       mb->un.mb_words[4], mb->un.mb_words[5],
+                       mb->un.mb_words[6], mb->un.mb_words[7],
+                       mb->un.mb_words[8], mb->un.mb_words[9],
+                       mb->un.mb_words[10], mb->un.mb_words[11],
+                       mb->un.mb_words[12], mboxq->mcqe.word0,
+                       mboxq->mcqe.mcqe_tag0,  mboxq->mcqe.mcqe_tag1,
+                       mboxq->mcqe.trailer);
+exit:
+       /* We are holding the token, no needed for lock when release */
+       spin_lock_irqsave(&phba->hbalock, iflag);
+       psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       phba->sli.mbox_active = NULL;
+       spin_unlock_irqrestore(&phba->hbalock, iflag);
+       return rc;
+}
+
+/**
+ * lpfc_sli_issue_mbox_s4 - Issue an SLI4 mailbox command to firmware
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This function is called by discovery code and HBA management code to submit
+ * a mailbox command to firmware with SLI-4 interface spec.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+static int
+lpfc_sli_issue_mbox_s4(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+                      uint32_t flag)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       unsigned long iflags;
+       int rc;
+
+       /* Detect polling mode and jump to a handler */
+       if (!phba->sli4_hba.intr_enable) {
+               if (flag == MBX_POLL)
+                       rc = lpfc_sli4_post_sync_mbox(phba, mboxq);
+               else
+                       rc = -EIO;
+               if (rc != MBX_SUCCESS)
+                       lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                                       "(%d):2541 Mailbox command x%x "
+                                       "(x%x) cannot issue Data: x%x x%x\n",
+                                       mboxq->vport ? mboxq->vport->vpi : 0,
+                                       mboxq->u.mb.mbxCommand,
+                                       lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                                       psli->sli_flag, flag);
+               return rc;
+       } else if (flag == MBX_POLL) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "(%d):2542 Mailbox command x%x (x%x) "
+                               "cannot issue Data: x%x x%x\n",
+                               mboxq->vport ? mboxq->vport->vpi : 0,
+                               mboxq->u.mb.mbxCommand,
+                               lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                               psli->sli_flag, flag);
+               return -EIO;
+       }
+
+       /* Now, interrupt mode asynchrous mailbox command */
+       rc = lpfc_mbox_cmd_check(phba, mboxq);
+       if (rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "(%d):2543 Mailbox command x%x (x%x) "
+                               "cannot issue Data: x%x x%x\n",
+                               mboxq->vport ? mboxq->vport->vpi : 0,
+                               mboxq->u.mb.mbxCommand,
+                               lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                               psli->sli_flag, flag);
+               goto out_not_finished;
+       }
+       rc = lpfc_mbox_dev_check(phba);
+       if (unlikely(rc)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "(%d):2544 Mailbox command x%x (x%x) "
+                               "cannot issue Data: x%x x%x\n",
+                               mboxq->vport ? mboxq->vport->vpi : 0,
+                               mboxq->u.mb.mbxCommand,
+                               lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                               psli->sli_flag, flag);
+               goto out_not_finished;
+       }
+
+       /* Put the mailbox command to the driver internal FIFO */
+       psli->slistat.mbox_busy++;
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       lpfc_mbox_put(phba, mboxq);
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                       "(%d):0354 Mbox cmd issue - Enqueue Data: "
+                       "x%x (x%x) x%x x%x x%x\n",
+                       mboxq->vport ? mboxq->vport->vpi : 0xffffff,
+                       bf_get(lpfc_mqe_command, &mboxq->u.mqe),
+                       lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                       phba->pport->port_state,
+                       psli->sli_flag, MBX_NOWAIT);
+       /* Wake up worker thread to transport mailbox command from head */
+       lpfc_worker_wake_up(phba);
+
+       return MBX_BUSY;
+
+out_not_finished:
+       return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli4_post_async_mbox - Post an SLI4 mailbox command to device
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called by worker thread to send a mailbox command to
+ * SLI4 HBA firmware.
+ *
+ **/
+int
+lpfc_sli4_post_async_mbox(struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli = &phba->sli;
+       LPFC_MBOXQ_t *mboxq;
+       int rc = MBX_SUCCESS;
+       unsigned long iflags;
+       struct lpfc_mqe *mqe;
+       uint32_t mbx_cmnd;
+
+       /* Check interrupt mode before post async mailbox command */
+       if (unlikely(!phba->sli4_hba.intr_enable))
+               return MBX_NOT_FINISHED;
+
+       /* Check for mailbox command service token */
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               return MBX_NOT_FINISHED;
+       }
+       if (psli->sli_flag & LPFC_SLI_MBOX_ACTIVE) {
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               return MBX_NOT_FINISHED;
+       }
+       if (unlikely(phba->sli.mbox_active)) {
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "0384 There is pending active mailbox cmd\n");
+               return MBX_NOT_FINISHED;
+       }
+       /* Take the mailbox command service token */
+       psli->sli_flag |= LPFC_SLI_MBOX_ACTIVE;
+
+       /* Get the next mailbox command from head of queue */
+       mboxq = lpfc_mbox_get(phba);
+
+       /* If no more mailbox command waiting for post, we're done */
+       if (!mboxq) {
+               psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               return MBX_SUCCESS;
+       }
+       phba->sli.mbox_active = mboxq;
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+       /* Check device readiness for posting mailbox command */
+       rc = lpfc_mbox_dev_check(phba);
+       if (unlikely(rc))
+               /* Driver clean routine will clean up pending mailbox */
+               goto out_not_finished;
+
+       /* Prepare the mbox command to be posted */
+       mqe = &mboxq->u.mqe;
+       mbx_cmnd = bf_get(lpfc_mqe_command, mqe);
+
+       /* Start timer for the mbox_tmo and log some mailbox post messages */
+       mod_timer(&psli->mbox_tmo, (jiffies +
+                 (HZ * lpfc_mbox_tmo_val(phba, mbx_cmnd))));
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_MBOX | LOG_SLI,
+                       "(%d):0355 Mailbox cmd x%x (x%x) issue Data: "
+                       "x%x x%x\n",
+                       mboxq->vport ? mboxq->vport->vpi : 0, mbx_cmnd,
+                       lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                       phba->pport->port_state, psli->sli_flag);
+
+       if (mbx_cmnd != MBX_HEARTBEAT) {
+               if (mboxq->vport) {
+                       lpfc_debugfs_disc_trc(mboxq->vport,
+                               LPFC_DISC_TRC_MBOX_VPORT,
+                               "MBOX Send vport: cmd:x%x mb:x%x x%x",
+                               mbx_cmnd, mqe->un.mb_words[0],
+                               mqe->un.mb_words[1]);
+               } else {
+                       lpfc_debugfs_disc_trc(phba->pport,
+                               LPFC_DISC_TRC_MBOX,
+                               "MBOX Send: cmd:x%x mb:x%x x%x",
+                               mbx_cmnd, mqe->un.mb_words[0],
+                               mqe->un.mb_words[1]);
+               }
+       }
+       psli->slistat.mbox_cmd++;
+
+       /* Post the mailbox command to the port */
+       rc = lpfc_sli4_mq_put(phba->sli4_hba.mbx_wq, mqe);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+                               "(%d):2533 Mailbox command x%x (x%x) "
+                               "cannot issue Data: x%x x%x\n",
+                               mboxq->vport ? mboxq->vport->vpi : 0,
+                               mboxq->u.mb.mbxCommand,
+                               lpfc_sli4_mbox_opcode_get(phba, mboxq),
+                               psli->sli_flag, MBX_NOWAIT);
+               goto out_not_finished;
+       }
+
+       return rc;
+
+out_not_finished:
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       mboxq->u.mb.mbxStatus = MBX_NOT_FINISHED;
+       __lpfc_mbox_cmpl_put(phba, mboxq);
+       /* Release the token */
+       psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       phba->sli.mbox_active = NULL;
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+       return MBX_NOT_FINISHED;
+}
+
+/**
+ * lpfc_sli_issue_mbox - Wrapper func for issuing mailbox command
+ * @phba: Pointer to HBA context object.
+ * @pmbox: Pointer to mailbox object.
+ * @flag: Flag indicating how the mailbox need to be processed.
+ *
+ * This routine wraps the actual SLI3 or SLI4 mailbox issuing routine from
+ * the API jump table function pointer from the lpfc_hba struct.
+ *
+ * Return codes the caller owns the mailbox command after the return of the
+ * function.
+ **/
+int
+lpfc_sli_issue_mbox(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmbox, uint32_t flag)
+{
+       return phba->lpfc_sli_issue_mbox(phba, pmbox, flag);
+}
+
+/**
+ * lpfc_mbox_api_table_setup - Set up mbox api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the mbox interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_mbox_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+       switch (dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s3;
+               phba->lpfc_sli_handle_slow_ring_event =
+                               lpfc_sli_handle_slow_ring_event_s3;
+               phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s3;
+               phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s3;
+               phba->lpfc_sli_brdready = lpfc_sli_brdready_s3;
+               break;
+       case LPFC_PCI_DEV_OC:
+               phba->lpfc_sli_issue_mbox = lpfc_sli_issue_mbox_s4;
+               phba->lpfc_sli_handle_slow_ring_event =
+                               lpfc_sli_handle_slow_ring_event_s4;
+               phba->lpfc_sli_hbq_to_firmware = lpfc_sli_hbq_to_firmware_s4;
+               phba->lpfc_sli_brdrestart = lpfc_sli_brdrestart_s4;
+               phba->lpfc_sli_brdready = lpfc_sli_brdready_s4;
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1420 Invalid HBA PCI-device group: 0x%x\n",
+                               dev_grp);
+               return -ENODEV;
+               break;
+       }
+       return 0;
+}
+
 /**
  * __lpfc_sli_ringtx_put - Add an iocb to the txq
  * @phba: Pointer to HBA context object.
@@ -3701,35 +5488,34 @@ lpfc_sli_next_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 }
 
 /**
- * __lpfc_sli_issue_iocb - Lockless version of lpfc_sli_issue_iocb
+ * __lpfc_sli_issue_iocb_s3 - SLI3 device lockless ver of lpfc_sli_issue_iocb
  * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
+ * @ring_number: SLI ring number to issue iocb on.
  * @piocb: Pointer to command iocb.
  * @flag: Flag indicating if this command can be put into txq.
  *
- * __lpfc_sli_issue_iocb is used by other functions in the driver
- * to issue an iocb command to the HBA. If the PCI slot is recovering
- * from error state or if HBA is resetting or if LPFC_STOP_IOCB_EVENT
- * flag is turned on, the function returns IOCB_ERROR.
- * When the link is down, this function allows only iocbs for
- * posting buffers.
- * This function finds next available slot in the command ring and
- * posts the command to the available slot and writes the port
- * attention register to request HBA start processing new iocb.
- * If there is no slot available in the ring and
- * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the
- * txq, otherwise the function returns IOCB_BUSY.
- *
- * This function is called with hbalock held.
- * The function will return success after it successfully submit the
- * iocb to firmware or after adding to the txq.
+ * __lpfc_sli_issue_iocb_s3 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-3 interface spec. If the PCI slot is
+ * recovering from error state, if HBA is resetting or if LPFC_STOP_IOCB_EVENT
+ * flag is turned on, the function returns IOCB_ERROR. When the link is down,
+ * this function allows only iocbs for posting buffers. This function finds
+ * next available slot in the command ring and posts the command to the
+ * available slot and writes the port attention register to request HBA start
+ * processing new iocb. If there is no slot available in the ring and
+ * flag & SLI_IOCB_RET_IOCB is set, the new iocb is added to the txq, otherwise
+ * the function returns IOCB_BUSY.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
  **/
 static int
-__lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+__lpfc_sli_issue_iocb_s3(struct lpfc_hba *phba, uint32_t ring_number,
                    struct lpfc_iocbq *piocb, uint32_t flag)
 {
        struct lpfc_iocbq *nextiocb;
        IOCB_t *iocb;
+       struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
 
        if (piocb->iocb_cmpl && (!piocb->vport) &&
           (piocb->iocb.ulpCommand != CMD_ABORT_XRI_CN) &&
@@ -3833,74 +5619,566 @@ __lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        return IOCB_BUSY;
 }
 
+/**
+ * lpfc_sli4_bpl2sgl - Convert the bpl/bde to a sgl.
+ * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @sglq: Pointer to the scatter gather queue object.
+ *
+ * This routine converts the bpl or bde that is in the IOCB
+ * to a sgl list for the sli4 hardware. The physical address
+ * of the bpl/bde is converted back to a virtual address.
+ * If the IOCB contains a BPL then the list of BDE's is
+ * converted to sli4_sge's. If the IOCB contains a single
+ * BDE then it is converted to a single sli_sge.
+ * The IOCB is still in cpu endianess so the contents of
+ * the bpl can be used without byte swapping.
+ *
+ * Returns valid XRI = Success, NO_XRI = Failure.
+**/
+static uint16_t
+lpfc_sli4_bpl2sgl(struct lpfc_hba *phba, struct lpfc_iocbq *piocbq,
+               struct lpfc_sglq *sglq)
+{
+       uint16_t xritag = NO_XRI;
+       struct ulp_bde64 *bpl = NULL;
+       struct ulp_bde64 bde;
+       struct sli4_sge *sgl  = NULL;
+       IOCB_t *icmd;
+       int numBdes = 0;
+       int i = 0;
+
+       if (!piocbq || !sglq)
+               return xritag;
+
+       sgl  = (struct sli4_sge *)sglq->sgl;
+       icmd = &piocbq->iocb;
+       if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+               numBdes = icmd->un.genreq64.bdl.bdeSize /
+                               sizeof(struct ulp_bde64);
+               /* The addrHigh and addrLow fields within the IOCB
+                * have not been byteswapped yet so there is no
+                * need to swap them back.
+                */
+               bpl  = (struct ulp_bde64 *)
+                       ((struct lpfc_dmabuf *)piocbq->context3)->virt;
+
+               if (!bpl)
+                       return xritag;
+
+               for (i = 0; i < numBdes; i++) {
+                       /* Should already be byte swapped. */
+                       sgl->addr_hi =  bpl->addrHigh;
+                       sgl->addr_lo =  bpl->addrLow;
+                       /* swap the size field back to the cpu so we
+                        * can assign it to the sgl.
+                        */
+                       bde.tus.w  = le32_to_cpu(bpl->tus.w);
+                       bf_set(lpfc_sli4_sge_len, sgl, bde.tus.f.bdeSize);
+                       if ((i+1) == numBdes)
+                               bf_set(lpfc_sli4_sge_last, sgl, 1);
+                       else
+                               bf_set(lpfc_sli4_sge_last, sgl, 0);
+                       sgl->word2 = cpu_to_le32(sgl->word2);
+                       sgl->word3 = cpu_to_le32(sgl->word3);
+                       bpl++;
+                       sgl++;
+               }
+       } else if (icmd->un.genreq64.bdl.bdeFlags == BUFF_TYPE_BDE_64) {
+                       /* The addrHigh and addrLow fields of the BDE have not
+                        * been byteswapped yet so they need to be swapped
+                        * before putting them in the sgl.
+                        */
+                       sgl->addr_hi =
+                               cpu_to_le32(icmd->un.genreq64.bdl.addrHigh);
+                       sgl->addr_lo =
+                               cpu_to_le32(icmd->un.genreq64.bdl.addrLow);
+                       bf_set(lpfc_sli4_sge_len, sgl,
+                               icmd->un.genreq64.bdl.bdeSize);
+                       bf_set(lpfc_sli4_sge_last, sgl, 1);
+                       sgl->word2 = cpu_to_le32(sgl->word2);
+                       sgl->word3 = cpu_to_le32(sgl->word3);
+       }
+       return sglq->sli4_xritag;
+}
 
 /**
- * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
+ * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
  * @phba: Pointer to HBA context object.
- * @pring: Pointer to driver SLI ring object.
  * @piocb: Pointer to command iocb.
- * @flag: Flag indicating if this command can be put into txq.
  *
- * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
- * function. This function gets the hbalock and calls
- * __lpfc_sli_issue_iocb function and will return the error returned
- * by __lpfc_sli_issue_iocb function. This wrapper is used by
- * functions which do not hold hbalock.
+ * This routine performs a round robin SCSI command to SLI4 FCP WQ index
+ * distribution.
+ *
+ * Return: index into SLI4 fast-path FCP queue index.
  **/
-int
-lpfc_sli_issue_iocb(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
-                   struct lpfc_iocbq *piocb, uint32_t flag)
+static uint32_t
+lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
 {
-       unsigned long iflags;
-       int rc;
-
-       spin_lock_irqsave(&phba->hbalock, iflags);
-       rc = __lpfc_sli_issue_iocb(phba, pring, piocb, flag);
-       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       static uint32_t fcp_qidx;
 
-       return rc;
+       return fcp_qidx++ % phba->cfg_fcp_wq_count;
 }
 
 /**
- * lpfc_extra_ring_setup - Extra ring setup function
+ * lpfc_sli_iocb2wqe - Convert the IOCB to a work queue entry.
  * @phba: Pointer to HBA context object.
+ * @piocb: Pointer to command iocb.
+ * @wqe: Pointer to the work queue entry.
  *
- * This function is called while driver attaches with the
- * HBA to setup the extra ring. The extra ring is used
- * only when driver needs to support target mode functionality
- * or IP over FC functionalities.
+ * This routine converts the iocb command to its Work Queue Entry
+ * equivalent. The wqe pointer should not have any fields set when
+ * this routine is called because it will memcpy over them.
+ * This routine does not set the CQ_ID or the WQEC bits in the
+ * wqe.
  *
- * This function is called with no lock held.
+ * Returns: 0 = Success, IOCB_ERROR = Failure.
  **/
 static int
-lpfc_extra_ring_setup( struct lpfc_hba *phba)
+lpfc_sli4_iocb2wqe(struct lpfc_hba *phba, struct lpfc_iocbq *iocbq,
+               union lpfc_wqe *wqe)
 {
-       struct lpfc_sli *psli;
-       struct lpfc_sli_ring *pring;
+       uint32_t payload_len = 0;
+       uint8_t ct = 0;
+       uint32_t fip;
+       uint32_t abort_tag;
+       uint8_t command_type = ELS_COMMAND_NON_FIP;
+       uint8_t cmnd;
+       uint16_t xritag;
+       struct ulp_bde64 *bpl = NULL;
+
+       fip = bf_get(lpfc_fip_flag, &phba->sli4_hba.sli4_flags);
+       /* The fcp commands will set command type */
+       if ((!(iocbq->iocb_flag &  LPFC_IO_FCP)) && (!fip))
+               command_type = ELS_COMMAND_NON_FIP;
+       else if (!(iocbq->iocb_flag &  LPFC_IO_FCP))
+               command_type = ELS_COMMAND_FIP;
+       else if (iocbq->iocb_flag &  LPFC_IO_FCP)
+               command_type = FCP_COMMAND;
+       else {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "2019 Invalid cmd 0x%x\n",
+                       iocbq->iocb.ulpCommand);
+               return IOCB_ERROR;
+       }
+       /* Some of the fields are in the right position already */
+       memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
+       abort_tag = (uint32_t) iocbq->iotag;
+       xritag = iocbq->sli4_xritag;
+       wqe->words[7] = 0; /* The ct field has moved so reset */
+       /* words0-2 bpl convert bde */
+       if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
+               bpl  = (struct ulp_bde64 *)
+                       ((struct lpfc_dmabuf *)iocbq->context3)->virt;
+               if (!bpl)
+                       return IOCB_ERROR;
 
-       psli = &phba->sli;
+               /* Should already be byte swapped. */
+               wqe->generic.bde.addrHigh =  le32_to_cpu(bpl->addrHigh);
+               wqe->generic.bde.addrLow =  le32_to_cpu(bpl->addrLow);
+               /* swap the size field back to the cpu so we
+                * can assign it to the sgl.
+                */
+               wqe->generic.bde.tus.w  = le32_to_cpu(bpl->tus.w);
+               payload_len = wqe->generic.bde.tus.f.bdeSize;
+       } else
+               payload_len = iocbq->iocb.un.fcpi64.bdl.bdeSize;
 
-       /* Adjust cmd/rsp ring iocb entries more evenly */
+       iocbq->iocb.ulpIoTag = iocbq->iotag;
+       cmnd = iocbq->iocb.ulpCommand;
 
-       /* Take some away from the FCP ring */
-       pring = &psli->ring[psli->fcp_ring];
-       pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
-       pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
-       pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
-       pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+       switch (iocbq->iocb.ulpCommand) {
+       case CMD_ELS_REQUEST64_CR:
+               if (!iocbq->iocb.ulpLe) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2007 Only Limited Edition cmd Format"
+                               " supported 0x%x\n",
+                               iocbq->iocb.ulpCommand);
+                       return IOCB_ERROR;
+               }
+               wqe->els_req.payload_len = payload_len;
+               /* Els_reguest64 has a TMO */
+               bf_set(wqe_tmo, &wqe->els_req.wqe_com,
+                       iocbq->iocb.ulpTimeout);
+               /* Need a VF for word 4 set the vf bit*/
+               bf_set(els_req64_vf, &wqe->els_req, 0);
+               /* And a VFID for word 12 */
+               bf_set(els_req64_vfid, &wqe->els_req, 0);
+               /*
+                * Set ct field to 3, indicates that the context_tag field
+                * contains the FCFI and remote N_Port_ID is
+                * in word 5.
+                */
 
-       /* and give them to the extra ring */
-       pring = &psli->ring[psli->extra_ring];
+               ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+               bf_set(lpfc_wqe_gen_context, &wqe->generic,
+                               iocbq->iocb.ulpContext);
 
-       pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
-       pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
-       pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
-       pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+               if (iocbq->vport->fc_myDID != 0) {
+                       bf_set(els_req64_sid, &wqe->els_req,
+                                iocbq->vport->fc_myDID);
+                       bf_set(els_req64_sp, &wqe->els_req, 1);
+               }
+               bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
+               bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+               /* CCP CCPE PV PRI in word10 were set in the memcpy */
+       break;
+       case CMD_XMIT_SEQUENCE64_CR:
+               /* word3 iocb=io_tag32 wqe=payload_offset */
+               /* payload offset used for multilpe outstanding
+                * sequences on the same exchange
+                */
+               wqe->words[3] = 0;
+               /* word4 relative_offset memcpy */
+               /* word5 r_ctl/df_ctl memcpy */
+               bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+               wqe->xmit_sequence.xmit_len = payload_len;
+       break;
+       case CMD_XMIT_BCAST64_CN:
+               /* word3 iocb=iotag32 wqe=payload_len */
+               wqe->words[3] = 0; /* no definition for this in wqe */
+               /* word4 iocb=rsvd wqe=rsvd */
+               /* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
+               /* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
+               bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+                       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+       break;
+       case CMD_FCP_IWRITE64_CR:
+               command_type = FCP_COMMAND_DATA_OUT;
+               /* The struct for wqe fcp_iwrite has 3 fields that are somewhat
+                * confusing.
+                * word3 is payload_len: byte offset to the sgl entry for the
+                * fcp_command.
+                * word4 is total xfer len, same as the IOCB->ulpParameter.
+                * word5 is initial xfer len 0 = wait for xfer-ready
+                */
 
-       /* Setup default profile for this ring */
-       pring->iotag_max = 4096;
-       pring->num_mask = 1;
-       pring->prt[0].profile = 0;      /* Mask 0 */
+               /* Always wait for xfer-ready before sending data */
+               wqe->fcp_iwrite.initial_xfer_len = 0;
+               /* word 4 (xfer length) should have been set on the memcpy */
+
+       /* allow write to fall through to read */
+       case CMD_FCP_IREAD64_CR:
+               /* FCP_CMD is always the 1st sgl entry */
+               wqe->fcp_iread.payload_len =
+                       payload_len + sizeof(struct fcp_rsp);
+
+               /* word 4 (xfer length) should have been set on the memcpy */
+
+               bf_set(lpfc_wqe_gen_erp, &wqe->generic,
+                       iocbq->iocb.ulpFCP2Rcvy);
+               bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
+               /* The XC bit and the XS bit are similar. The driver never
+                * tracked whether or not the exchange was previouslly open.
+                * XC = Exchange create, 0 is create. 1 is already open.
+                * XS = link cmd: 1 do not close the exchange after command.
+                * XS = 0 close exchange when command completes.
+                * The only time we would not set the XC bit is when the XS bit
+                * is set and we are sending our 2nd or greater command on
+                * this exchange.
+                */
+
+       /* ALLOW read & write to fall through to ICMD64 */
+       case CMD_FCP_ICMND64_CR:
+               /* Always open the exchange */
+               bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
+
+               wqe->words[10] &= 0xffff0000; /* zero out ebde count */
+               bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+       break;
+       case CMD_GEN_REQUEST64_CR:
+               /* word3 command length is described as byte offset to the
+                * rsp_data. Would always be 16, sizeof(struct sli4_sge)
+                * sgl[0] = cmnd
+                * sgl[1] = rsp.
+                *
+                */
+               wqe->gen_req.command_len = payload_len;
+               /* Word4 parameter  copied in the memcpy */
+               /* Word5 [rctl, type, df_ctl, la] copied in memcpy */
+               /* word6 context tag copied in memcpy */
+               if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
+                       ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2015 Invalid CT %x command 0x%x\n",
+                               ct, iocbq->iocb.ulpCommand);
+                       return IOCB_ERROR;
+               }
+               bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
+               bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
+                       iocbq->iocb.ulpTimeout);
+
+               bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+               command_type = OTHER_COMMAND;
+       break;
+       case CMD_XMIT_ELS_RSP64_CX:
+               /* words0-2 BDE memcpy */
+               /* word3 iocb=iotag32 wqe=rsvd */
+               wqe->words[3] = 0;
+               /* word4 iocb=did wge=rsvd. */
+               wqe->words[4] = 0;
+               /* word5 iocb=rsvd wge=did */
+               bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
+                        iocbq->iocb.un.elsreq64.remoteID);
+
+               bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+                       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+
+               bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+               bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+               if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
+                       bf_set(lpfc_wqe_gen_context, &wqe->generic,
+                              iocbq->vport->vpi + phba->vpi_base);
+               command_type = OTHER_COMMAND;
+       break;
+       case CMD_CLOSE_XRI_CN:
+       case CMD_ABORT_XRI_CN:
+       case CMD_ABORT_XRI_CX:
+               /* words 0-2 memcpy should be 0 rserved */
+               /* port will send abts */
+               if (iocbq->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+                       /*
+                        * The link is down so the fw does not need to send abts
+                        * on the wire.
+                        */
+                       bf_set(abort_cmd_ia, &wqe->abort_cmd, 1);
+               else
+                       bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
+               bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
+               abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+               wqe->words[5] = 0;
+               bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+                       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+               abort_tag = iocbq->iocb.un.acxri.abortIoTag;
+               wqe->generic.abort_tag = abort_tag;
+               /*
+                * The abort handler will send us CMD_ABORT_XRI_CN or
+                * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
+                */
+               bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
+               cmnd = CMD_ABORT_XRI_CX;
+               command_type = OTHER_COMMAND;
+               xritag = 0;
+       break;
+       case CMD_XRI_ABORTED_CX:
+       case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
+               /* words0-2 are all 0's no bde */
+               /* word3 and word4 are rsvrd */
+               wqe->words[3] = 0;
+               wqe->words[4] = 0;
+               /* word5 iocb=rsvd wge=did */
+               /* There is no remote port id in the IOCB? */
+               /* Let this fall through and fail */
+       case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
+       case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
+       case CMD_FCP_TRSP64_CX: /* Target mode rcv */
+       case CMD_FCP_AUTO_TRSP_CX: /* Auto target rsp */
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2014 Invalid command 0x%x\n",
+                               iocbq->iocb.ulpCommand);
+               return IOCB_ERROR;
+       break;
+
+       }
+       bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
+       bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
+       wqe->generic.abort_tag = abort_tag;
+       bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
+       bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
+       bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
+       bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
+
+       return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb_s4 - SLI4 device lockless ver of lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @ring_number: SLI ring number to issue iocb on.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * __lpfc_sli_issue_iocb_s4 is used by other functions in the driver to issue
+ * an iocb command to an HBA with SLI-4 interface spec.
+ *
+ * This function is called with hbalock held. The function will return success
+ * after it successfully submit the iocb to firmware or after adding to the
+ * txq.
+ **/
+static int
+__lpfc_sli_issue_iocb_s4(struct lpfc_hba *phba, uint32_t ring_number,
+                        struct lpfc_iocbq *piocb, uint32_t flag)
+{
+       struct lpfc_sglq *sglq;
+       uint16_t xritag;
+       union lpfc_wqe wqe;
+       struct lpfc_sli_ring *pring = &phba->sli.ring[ring_number];
+       uint32_t fcp_wqidx;
+
+       if (piocb->sli4_xritag == NO_XRI) {
+               if (piocb->iocb.ulpCommand == CMD_ABORT_XRI_CN ||
+                       piocb->iocb.ulpCommand == CMD_CLOSE_XRI_CN)
+                       sglq = NULL;
+               else {
+                       sglq = __lpfc_sli_get_sglq(phba);
+                       if (!sglq)
+                               return IOCB_ERROR;
+                       piocb->sli4_xritag = sglq->sli4_xritag;
+               }
+       } else if (piocb->iocb_flag &  LPFC_IO_FCP) {
+               sglq = NULL; /* These IO's already have an XRI and
+                             * a mapped sgl.
+                             */
+       } else {
+               /* This is a continuation of a commandi,(CX) so this
+                * sglq is on the active list
+                */
+               sglq = __lpfc_get_active_sglq(phba, piocb->sli4_xritag);
+               if (!sglq)
+                       return IOCB_ERROR;
+       }
+
+       if (sglq) {
+               xritag = lpfc_sli4_bpl2sgl(phba, piocb, sglq);
+               if (xritag != sglq->sli4_xritag)
+                       return IOCB_ERROR;
+       }
+
+       if (lpfc_sli4_iocb2wqe(phba, piocb, &wqe))
+               return IOCB_ERROR;
+
+       if (piocb->iocb_flag &  LPFC_IO_FCP) {
+               fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
+               if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
+                       return IOCB_ERROR;
+       } else {
+               if (lpfc_sli4_wq_put(phba->sli4_hba.els_wq, &wqe))
+                       return IOCB_ERROR;
+       }
+       lpfc_sli_ringtxcmpl_put(phba, pring, piocb);
+
+       return 0;
+}
+
+/**
+ * __lpfc_sli_issue_iocb - Wrapper func of lockless version for issuing iocb
+ *
+ * This routine wraps the actual lockless version for issusing IOCB function
+ * pointer from the lpfc_hba struct.
+ *
+ * Return codes:
+ *     IOCB_ERROR - Error
+ *     IOCB_SUCCESS - Success
+ *     IOCB_BUSY - Busy
+ **/
+static inline int
+__lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+               struct lpfc_iocbq *piocb, uint32_t flag)
+{
+       return phba->__lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+}
+
+/**
+ * lpfc_sli_api_table_setup - Set up sli api fucntion jump table
+ * @phba: The hba struct for which this call is being executed.
+ * @dev_grp: The HBA PCI-Device group number.
+ *
+ * This routine sets up the SLI interface API function jump table in @phba
+ * struct.
+ * Returns: 0 - success, -ENODEV - failure.
+ **/
+int
+lpfc_sli_api_table_setup(struct lpfc_hba *phba, uint8_t dev_grp)
+{
+
+       switch (dev_grp) {
+       case LPFC_PCI_DEV_LP:
+               phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s3;
+               phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s3;
+               break;
+       case LPFC_PCI_DEV_OC:
+               phba->__lpfc_sli_issue_iocb = __lpfc_sli_issue_iocb_s4;
+               phba->__lpfc_sli_release_iocbq = __lpfc_sli_release_iocbq_s4;
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "1419 Invalid HBA PCI-device group: 0x%x\n",
+                               dev_grp);
+               return -ENODEV;
+               break;
+       }
+       phba->lpfc_get_iocb_from_iocbq = lpfc_get_iocb_from_iocbq;
+       return 0;
+}
+
+/**
+ * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @piocb: Pointer to command iocb.
+ * @flag: Flag indicating if this command can be put into txq.
+ *
+ * lpfc_sli_issue_iocb is a wrapper around __lpfc_sli_issue_iocb
+ * function. This function gets the hbalock and calls
+ * __lpfc_sli_issue_iocb function and will return the error returned
+ * by __lpfc_sli_issue_iocb function. This wrapper is used by
+ * functions which do not hold hbalock.
+ **/
+int
+lpfc_sli_issue_iocb(struct lpfc_hba *phba, uint32_t ring_number,
+                   struct lpfc_iocbq *piocb, uint32_t flag)
+{
+       unsigned long iflags;
+       int rc;
+
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+       return rc;
+}
+
+/**
+ * lpfc_extra_ring_setup - Extra ring setup function
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called while driver attaches with the
+ * HBA to setup the extra ring. The extra ring is used
+ * only when driver needs to support target mode functionality
+ * or IP over FC functionalities.
+ *
+ * This function is called with no lock held.
+ **/
+static int
+lpfc_extra_ring_setup( struct lpfc_hba *phba)
+{
+       struct lpfc_sli *psli;
+       struct lpfc_sli_ring *pring;
+
+       psli = &phba->sli;
+
+       /* Adjust cmd/rsp ring iocb entries more evenly */
+
+       /* Take some away from the FCP ring */
+       pring = &psli->ring[psli->fcp_ring];
+       pring->numCiocb -= SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+       pring->numRiocb -= SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+       pring->numCiocb -= SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+       pring->numRiocb -= SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+
+       /* and give them to the extra ring */
+       pring = &psli->ring[psli->extra_ring];
+
+       pring->numCiocb += SLI2_IOCB_CMD_R1XTRA_ENTRIES;
+       pring->numRiocb += SLI2_IOCB_RSP_R1XTRA_ENTRIES;
+       pring->numCiocb += SLI2_IOCB_CMD_R3XTRA_ENTRIES;
+       pring->numRiocb += SLI2_IOCB_RSP_R3XTRA_ENTRIES;
+
+       /* Setup default profile for this ring */
+       pring->iotag_max = 4096;
+       pring->num_mask = 1;
+       pring->prt[0].profile = 0;      /* Mask 0 */
        pring->prt[0].rctl = phba->cfg_multi_ring_rctl;
        pring->prt[0].type = phba->cfg_multi_ring_type;
        pring->prt[0].lpfc_sli_rcv_unsol_event = NULL;
@@ -4147,6 +6425,52 @@ lpfc_sli_queue_setup(struct lpfc_hba *phba)
        return 1;
 }
 
+/**
+ * lpfc_sli_mbox_sys_flush - Flush mailbox command sub-system
+ * @phba: Pointer to HBA context object.
+ *
+ * This routine flushes the mailbox command subsystem. It will unconditionally
+ * flush all the mailbox commands in the three possible stages in the mailbox
+ * command sub-system: pending mailbox command queue; the outstanding mailbox
+ * command; and completed mailbox command queue. It is caller's responsibility
+ * to make sure that the driver is in the proper state to flush the mailbox
+ * command sub-system. Namely, the posting of mailbox commands into the
+ * pending mailbox command queue from the various clients must be stopped;
+ * either the HBA is in a state that it will never works on the outstanding
+ * mailbox command (such as in EEH or ERATT conditions) or the outstanding
+ * mailbox command has been completed.
+ **/
+static void
+lpfc_sli_mbox_sys_flush(struct lpfc_hba *phba)
+{
+       LIST_HEAD(completions);
+       struct lpfc_sli *psli = &phba->sli;
+       LPFC_MBOXQ_t *pmb;
+       unsigned long iflag;
+
+       /* Flush all the mailbox commands in the mbox system */
+       spin_lock_irqsave(&phba->hbalock, iflag);
+       /* The pending mailbox command queue */
+       list_splice_init(&phba->sli.mboxq, &completions);
+       /* The outstanding active mailbox command */
+       if (psli->mbox_active) {
+               list_add_tail(&psli->mbox_active->list, &completions);
+               psli->mbox_active = NULL;
+               psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       }
+       /* The completed mailbox command queue */
+       list_splice_init(&phba->sli.mboxq_cmpl, &completions);
+       spin_unlock_irqrestore(&phba->hbalock, iflag);
+
+       /* Return all flushed mailbox commands with MBX_NOT_FINISHED status */
+       while (!list_empty(&completions)) {
+               list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
+               pmb->u.mb.mbxStatus = MBX_NOT_FINISHED;
+               if (pmb->mbox_cmpl)
+                       pmb->mbox_cmpl(phba, pmb);
+       }
+}
+
 /**
  * lpfc_sli_host_down - Vport cleanup function
  * @vport: Pointer to virtual port object.
@@ -4240,9 +6564,11 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
        struct lpfc_sli *psli = &phba->sli;
        struct lpfc_sli_ring *pring;
        struct lpfc_dmabuf *buf_ptr;
-       LPFC_MBOXQ_t *pmb;
-       int i;
        unsigned long flags = 0;
+       int i;
+
+       /* Shutdown the mailbox command sub-system */
+       lpfc_sli_mbox_sys_shutdown(phba);
 
        lpfc_hba_down_prep(phba);
 
@@ -4287,28 +6613,42 @@ lpfc_sli_hba_down(struct lpfc_hba *phba)
 
        /* Return any active mbox cmds */
        del_timer_sync(&psli->mbox_tmo);
-       spin_lock_irqsave(&phba->hbalock, flags);
 
-       spin_lock(&phba->pport->work_port_lock);
+       spin_lock_irqsave(&phba->pport->work_port_lock, flags);
        phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
-       spin_unlock(&phba->pport->work_port_lock);
+       spin_unlock_irqrestore(&phba->pport->work_port_lock, flags);
 
-       /* Return any pending or completed mbox cmds */
-       list_splice_init(&phba->sli.mboxq, &completions);
-       if (psli->mbox_active) {
-               list_add_tail(&psli->mbox_active->list, &completions);
-               psli->mbox_active = NULL;
-               psli->sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
-       }
-       list_splice_init(&phba->sli.mboxq_cmpl, &completions);
-       spin_unlock_irqrestore(&phba->hbalock, flags);
+       return 1;
+}
+
+/**
+ * lpfc_sli4_hba_down - PCI function resource cleanup for the SLI4 HBA
+ * @phba: Pointer to HBA context object.
+ *
+ * This function cleans up all queues, iocb, buffers, mailbox commands while
+ * shutting down the SLI4 HBA FCoE function. This function is called with no
+ * lock held and always returns 1.
+ *
+ * This function does the following to cleanup driver FCoE function resources:
+ * - Free discovery resources for each virtual port
+ * - Cleanup any pending fabric iocbs
+ * - Iterate through the iocb txq and free each entry in the list.
+ * - Free up any buffer posted to the HBA.
+ * - Clean up all the queue entries: WQ, RQ, MQ, EQ, CQ, etc.
+ * - Free mailbox commands in the mailbox queue.
+ **/
+int
+lpfc_sli4_hba_down(struct lpfc_hba *phba)
+{
+       /* Stop the SLI4 device port */
+       lpfc_stop_port(phba);
+
+       /* Tear down the queues in the HBA */
+       lpfc_sli4_queue_unset(phba);
+
+       /* unregister default FCFI from the HBA */
+       lpfc_sli4_fcfi_unreg(phba, phba->fcf.fcfi);
 
-       while (!list_empty(&completions)) {
-               list_remove_head(&completions, pmb, LPFC_MBOXQ_t, list);
-               pmb->mb.mbxStatus = MBX_NOT_FINISHED;
-               if (pmb->mbox_cmpl)
-                       pmb->mbox_cmpl(phba,pmb);
-       }
        return 1;
 }
 
@@ -4639,7 +6979,10 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
        iabt = &abtsiocbp->iocb;
        iabt->un.acxri.abortType = ABORT_TYPE_ABTS;
        iabt->un.acxri.abortContextTag = icmd->ulpContext;
-       iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               iabt->un.acxri.abortIoTag = cmdiocb->sli4_xritag;
+       else
+               iabt->un.acxri.abortIoTag = icmd->ulpIoTag;
        iabt->ulpLe = 1;
        iabt->ulpClass = icmd->ulpClass;
 
@@ -4655,7 +6998,7 @@ lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
                         "abort cmd iotag x%x\n",
                         iabt->un.acxri.abortContextTag,
                         iabt->un.acxri.abortIoTag, abtsiocbp->iotag);
-       retval = __lpfc_sli_issue_iocb(phba, pring, abtsiocbp, 0);
+       retval = __lpfc_sli_issue_iocb(phba, pring->ringno, abtsiocbp, 0);
 
        if (retval)
                __lpfc_sli_release_iocbq(phba, abtsiocbp);
@@ -4838,7 +7181,10 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
                cmd = &iocbq->iocb;
                abtsiocb->iocb.un.acxri.abortType = ABORT_TYPE_ABTS;
                abtsiocb->iocb.un.acxri.abortContextTag = cmd->ulpContext;
-               abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
+               if (phba->sli_rev == LPFC_SLI_REV4)
+                       abtsiocb->iocb.un.acxri.abortIoTag = iocbq->sli4_xritag;
+               else
+                       abtsiocb->iocb.un.acxri.abortIoTag = cmd->ulpIoTag;
                abtsiocb->iocb.ulpLe = 1;
                abtsiocb->iocb.ulpClass = cmd->ulpClass;
                abtsiocb->vport = phba->pport;
@@ -4850,7 +7196,8 @@ lpfc_sli_abort_iocb(struct lpfc_vport *vport, struct lpfc_sli_ring *pring,
 
                /* Setup callback routine and issue the command. */
                abtsiocb->iocb_cmpl = lpfc_sli_abort_fcp_cmpl;
-               ret_val = lpfc_sli_issue_iocb(phba, pring, abtsiocb, 0);
+               ret_val = lpfc_sli_issue_iocb(phba, pring->ringno,
+                                             abtsiocb, 0);
                if (ret_val == IOCB_ERROR) {
                        lpfc_sli_release_iocbq(phba, abtsiocb);
                        errcnt++;
@@ -4931,7 +7278,7 @@ lpfc_sli_wake_iocb_wait(struct lpfc_hba *phba,
  **/
 int
 lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
-                        struct lpfc_sli_ring *pring,
+                        uint32_t ring_number,
                         struct lpfc_iocbq *piocb,
                         struct lpfc_iocbq *prspiocbq,
                         uint32_t timeout)
@@ -4962,7 +7309,7 @@ lpfc_sli_issue_iocb_wait(struct lpfc_hba *phba,
                readl(phba->HCregaddr); /* flush */
        }
 
-       retval = lpfc_sli_issue_iocb(phba, pring, piocb, 0);
+       retval = lpfc_sli_issue_iocb(phba, ring_number, piocb, 0);
        if (retval == IOCB_SUCCESS) {
                timeout_req = timeout * HZ;
                timeleft = wait_event_timeout(done_q,
@@ -5077,140 +7424,267 @@ lpfc_sli_issue_mbox_wait(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq,
 }
 
 /**
- * lpfc_sli_flush_mbox_queue - mailbox queue cleanup function
+ * lpfc_sli_mbox_sys_shutdown - shutdown mailbox command sub-system
  * @phba: Pointer to HBA context.
  *
- * This function is called to cleanup any pending mailbox
- * objects in the driver queue before bringing the HBA offline.
- * This function is called while resetting the HBA.
- * The function is called without any lock held. The function
- * takes hbalock to update SLI data structure.
- * This function returns 1 when there is an active mailbox
- * command pending else returns 0.
+ * This function is called to shutdown the driver's mailbox sub-system.
+ * It first marks the mailbox sub-system is in a block state to prevent
+ * the asynchronous mailbox command from issued off the pending mailbox
+ * command queue. If the mailbox command sub-system shutdown is due to
+ * HBA error conditions such as EEH or ERATT, this routine shall invoke
+ * the mailbox sub-system flush routine to forcefully bring down the
+ * mailbox sub-system. Otherwise, if it is due to normal condition (such
+ * as with offline or HBA function reset), this routine will wait for the
+ * outstanding mailbox command to complete before invoking the mailbox
+ * sub-system flush routine to gracefully bring down mailbox sub-system.
  **/
-int
-lpfc_sli_flush_mbox_queue(struct lpfc_hba * phba)
+void
+lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba)
 {
-       struct lpfc_vport *vport = phba->pport;
-       int i = 0;
-       uint32_t ha_copy;
+       struct lpfc_sli *psli = &phba->sli;
+       uint8_t actcmd = MBX_HEARTBEAT;
+       unsigned long timeout;
 
-       while (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE && !vport->stopped) {
-               if (i++ > LPFC_MBOX_TMO * 1000)
-                       return 1;
+       spin_lock_irq(&phba->hbalock);
+       psli->sli_flag |= LPFC_SLI_ASYNC_MBX_BLK;
+       spin_unlock_irq(&phba->hbalock);
 
-               /*
-                * Call lpfc_sli_handle_mb_event only if a mailbox cmd
-                * did finish. This way we won't get the misleading
-                * "Stray Mailbox Interrupt" message.
-                */
+       if (psli->sli_flag & LPFC_SLI_ACTIVE) {
                spin_lock_irq(&phba->hbalock);
-               ha_copy = phba->work_ha;
-               phba->work_ha &= ~HA_MBATT;
+               if (phba->sli.mbox_active)
+                       actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
                spin_unlock_irq(&phba->hbalock);
-
-               if (ha_copy & HA_MBATT)
-                       if (lpfc_sli_handle_mb_event(phba) == 0)
-                               i = 0;
-
-               msleep(1);
+               /* Determine how long we might wait for the active mailbox
+                * command to be gracefully completed by firmware.
+                */
+               timeout = msecs_to_jiffies(lpfc_mbox_tmo_val(phba, actcmd) *
+                                          1000) + jiffies;
+               while (phba->sli.mbox_active) {
+                       /* Check active mailbox complete status every 2ms */
+                       msleep(2);
+                       if (time_after(jiffies, timeout))
+                               /* Timeout, let the mailbox flush routine to
+                                * forcefully release active mailbox command
+                                */
+                               break;
+               }
        }
-
-       return (phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) ? 1 : 0;
+       lpfc_sli_mbox_sys_flush(phba);
 }
 
 /**
- * lpfc_sli_check_eratt - check error attention events
+ * lpfc_sli_eratt_read - read sli-3 error attention events
  * @phba: Pointer to HBA context.
  *
- * This function is called form timer soft interrupt context to check HBA's
- * error attention register bit for error attention events.
+ * This function is called to read the SLI3 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
  *
  * This fucntion returns 1 when there is Error Attention in the Host Attention
  * Register and returns 0 otherwise.
  **/
-int
-lpfc_sli_check_eratt(struct lpfc_hba *phba)
+static int
+lpfc_sli_eratt_read(struct lpfc_hba *phba)
 {
        uint32_t ha_copy;
 
-       /* If PCI channel is offline, don't process it */
-       if (unlikely(pci_channel_offline(phba->pcidev)))
-               return 0;
-
-       /* If somebody is waiting to handle an eratt, don't process it
-        * here. The brdkill function will do this.
-        */
-       if (phba->link_flag & LS_IGNORE_ERATT)
-               return 0;
-
-       /* Check if interrupt handler handles this ERATT */
-       spin_lock_irq(&phba->hbalock);
-       if (phba->hba_flag & HBA_ERATT_HANDLED) {
-               /* Interrupt handler has handled ERATT */
-               spin_unlock_irq(&phba->hbalock);
-               return 0;
-       }
-
-       /*
-        * If there is deferred error attention, do not check for error
-        * attention
-        */
-       if (unlikely(phba->hba_flag & DEFER_ERATT)) {
-               spin_unlock_irq(&phba->hbalock);
-               return 0;
-       }
-
-       /* Read chip Host Attention (HA) register */
-       ha_copy = readl(phba->HAregaddr);
-       if (ha_copy & HA_ERATT) {
-               /* Read host status register to retrieve error event */
-               lpfc_sli_read_hs(phba);
+       /* Read chip Host Attention (HA) register */
+       ha_copy = readl(phba->HAregaddr);
+       if (ha_copy & HA_ERATT) {
+               /* Read host status register to retrieve error event */
+               lpfc_sli_read_hs(phba);
 
                /* Check if there is a deferred error condition is active */
                if ((HS_FFER1 & phba->work_hs) &&
-                       ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
-                       HS_FFER6 | HS_FFER7) & phba->work_hs)) {
+                   ((HS_FFER2 | HS_FFER3 | HS_FFER4 | HS_FFER5 |
+                    HS_FFER6 | HS_FFER7) & phba->work_hs)) {
+                       spin_lock_irq(&phba->hbalock);
                        phba->hba_flag |= DEFER_ERATT;
+                       spin_unlock_irq(&phba->hbalock);
                        /* Clear all interrupt enable conditions */
                        writel(0, phba->HCregaddr);
                        readl(phba->HCregaddr);
                }
 
                /* Set the driver HA work bitmap */
+               spin_lock_irq(&phba->hbalock);
                phba->work_ha |= HA_ERATT;
                /* Indicate polling handles this ERATT */
                phba->hba_flag |= HBA_ERATT_HANDLED;
                spin_unlock_irq(&phba->hbalock);
                return 1;
        }
+       return 0;
+}
+
+/**
+ * lpfc_sli4_eratt_read - read sli-4 error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called to read the SLI4 device error attention registers
+ * for possible error attention events. The caller must hold the hostlock
+ * with spin_lock_irq().
+ *
+ * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+static int
+lpfc_sli4_eratt_read(struct lpfc_hba *phba)
+{
+       uint32_t uerr_sta_hi, uerr_sta_lo;
+       uint32_t onlnreg0, onlnreg1;
+
+       /* For now, use the SLI4 device internal unrecoverable error
+        * registers for error attention. This can be changed later.
+        */
+       onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
+       onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
+       if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
+               uerr_sta_lo = readl(phba->sli4_hba.UERRLOregaddr);
+               uerr_sta_hi = readl(phba->sli4_hba.UERRHIregaddr);
+               if (uerr_sta_lo || uerr_sta_hi) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                                       "1423 HBA Unrecoverable error: "
+                                       "uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+                                       "online0_reg=0x%x, online1_reg=0x%x\n",
+                                       uerr_sta_lo, uerr_sta_hi,
+                                       onlnreg0, onlnreg1);
+                       /* TEMP: as the driver error recover logic is not
+                        * fully developed, we just log the error message
+                        * and the device error attention action is now
+                        * temporarily disabled.
+                        */
+                       return 0;
+                       phba->work_status[0] = uerr_sta_lo;
+                       phba->work_status[1] = uerr_sta_hi;
+                       spin_lock_irq(&phba->hbalock);
+                       /* Set the driver HA work bitmap */
+                       phba->work_ha |= HA_ERATT;
+                       /* Indicate polling handles this ERATT */
+                       phba->hba_flag |= HBA_ERATT_HANDLED;
+                       spin_unlock_irq(&phba->hbalock);
+                       return 1;
+               }
+       }
+       return 0;
+}
+
+/**
+ * lpfc_sli_check_eratt - check error attention events
+ * @phba: Pointer to HBA context.
+ *
+ * This function is called from timer soft interrupt context to check HBA's
+ * error attention register bit for error attention events.
+ *
+ * This fucntion returns 1 when there is Error Attention in the Host Attention
+ * Register and returns 0 otherwise.
+ **/
+int
+lpfc_sli_check_eratt(struct lpfc_hba *phba)
+{
+       uint32_t ha_copy;
+
+       /* If somebody is waiting to handle an eratt, don't process it
+        * here. The brdkill function will do this.
+        */
+       if (phba->link_flag & LS_IGNORE_ERATT)
+               return 0;
+
+       /* Check if interrupt handler handles this ERATT */
+       spin_lock_irq(&phba->hbalock);
+       if (phba->hba_flag & HBA_ERATT_HANDLED) {
+               /* Interrupt handler has handled ERATT */
+               spin_unlock_irq(&phba->hbalock);
+               return 0;
+       }
+
+       /*
+        * If there is deferred error attention, do not check for error
+        * attention
+        */
+       if (unlikely(phba->hba_flag & DEFER_ERATT)) {
+               spin_unlock_irq(&phba->hbalock);
+               return 0;
+       }
+
+       /* If PCI channel is offline, don't process it */
+       if (unlikely(pci_channel_offline(phba->pcidev))) {
+               spin_unlock_irq(&phba->hbalock);
+               return 0;
+       }
+
+       switch (phba->sli_rev) {
+       case LPFC_SLI_REV2:
+       case LPFC_SLI_REV3:
+               /* Read chip Host Attention (HA) register */
+               ha_copy = lpfc_sli_eratt_read(phba);
+               break;
+       case LPFC_SLI_REV4:
+               /* Read devcie Uncoverable Error (UERR) registers */
+               ha_copy = lpfc_sli4_eratt_read(phba);
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0299 Invalid SLI revision (%d)\n",
+                               phba->sli_rev);
+               ha_copy = 0;
+               break;
+       }
        spin_unlock_irq(&phba->hbalock);
+
+       return ha_copy;
+}
+
+/**
+ * lpfc_intr_state_check - Check device state for interrupt handling
+ * @phba: Pointer to HBA context.
+ *
+ * This inline routine checks whether a device or its PCI slot is in a state
+ * that the interrupt should be handled.
+ *
+ * This function returns 0 if the device or the PCI slot is in a state that
+ * interrupt should be handled, otherwise -EIO.
+ */
+static inline int
+lpfc_intr_state_check(struct lpfc_hba *phba)
+{
+       /* If the pci channel is offline, ignore all the interrupts */
+       if (unlikely(pci_channel_offline(phba->pcidev)))
+               return -EIO;
+
+       /* Update device level interrupt statistics */
+       phba->sli.slistat.sli_intr++;
+
+       /* Ignore all interrupts during initialization. */
+       if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+               return -EIO;
+
        return 0;
 }
 
 /**
- * lpfc_sp_intr_handler - The slow-path interrupt handler of lpfc driver
+ * lpfc_sli_sp_intr_handler - Slow-path interrupt handler to SLI-3 device
  * @irq: Interrupt number.
  * @dev_id: The device context pointer.
  *
  * This function is directly called from the PCI layer as an interrupt
- * service routine when the device is enabled with MSI-X multi-message
- * interrupt mode and there are slow-path events in the HBA. However,
- * when the device is enabled with either MSI or Pin-IRQ interrupt mode,
- * this function is called as part of the device-level interrupt handler.
- * When the PCI slot is in error recovery or the HBA is undergoing
- * initialization, the interrupt handler will not process the interrupt.
- * The link attention and ELS ring attention events are handled by the
- * worker thread. The interrupt handler signals the worker thread and
- * and returns for these events. This function is called without any
- * lock held. It gets the hbalock to access and update SLI data
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA
+ * is undergoing initialization, the interrupt handler will not process
+ * the interrupt. The link attention and ELS ring attention events are
+ * handled by the worker thread. The interrupt handler signals the worker
+ * thread and returns for these events. This function is called without
+ * any lock held. It gets the hbalock to access and update SLI data
  * structures.
  *
  * This function returns IRQ_HANDLED when interrupt is handled else it
  * returns IRQ_NONE.
  **/
 irqreturn_t
-lpfc_sp_intr_handler(int irq, void *dev_id)
+lpfc_sli_sp_intr_handler(int irq, void *dev_id)
 {
        struct lpfc_hba  *phba;
        uint32_t ha_copy;
@@ -5240,13 +7714,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
         * individual interrupt handler in MSI-X multi-message interrupt mode
         */
        if (phba->intr_type == MSIX) {
-               /* If the pci channel is offline, ignore all the interrupts */
-               if (unlikely(pci_channel_offline(phba->pcidev)))
-                       return IRQ_NONE;
-               /* Update device-level interrupt statistics */
-               phba->sli.slistat.sli_intr++;
-               /* Ignore all interrupts during initialization. */
-               if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+               /* Check device state for handling interrupt */
+               if (lpfc_intr_state_check(phba))
                        return IRQ_NONE;
                /* Need to read HA REG for slow-path events */
                spin_lock_irqsave(&phba->hbalock, iflag);
@@ -5271,7 +7740,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
                 * interrupt.
                 */
                if (unlikely(phba->hba_flag & DEFER_ERATT)) {
-                       spin_unlock_irq(&phba->hbalock);
+                       spin_unlock_irqrestore(&phba->hbalock, iflag);
                        return IRQ_NONE;
                }
 
@@ -5364,7 +7833,7 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
 
                if ((work_ha_copy & HA_MBATT) && (phba->sli.mbox_active)) {
                        pmb = phba->sli.mbox_active;
-                       pmbox = &pmb->mb;
+                       pmbox = &pmb->u.mb;
                        mbox = phba->mbox;
                        vport = pmb->vport;
 
@@ -5434,7 +7903,8 @@ lpfc_sp_intr_handler(int irq, void *dev_id)
                                                        LOG_MBOX | LOG_SLI,
                                                        "0350 rc should have"
                                                        "been MBX_BUSY");
-                                               goto send_current_mbox;
+                                               if (rc != MBX_NOT_FINISHED)
+                                                       goto send_current_mbox;
                                        }
                                }
                                spin_lock_irqsave(
@@ -5471,29 +7941,29 @@ send_current_mbox:
        }
        return IRQ_HANDLED;
 
-} /* lpfc_sp_intr_handler */
+} /* lpfc_sli_sp_intr_handler */
 
 /**
- * lpfc_fp_intr_handler - The fast-path interrupt handler of lpfc driver
+ * lpfc_sli_fp_intr_handler - Fast-path interrupt handler to SLI-3 device.
  * @irq: Interrupt number.
  * @dev_id: The device context pointer.
  *
  * This function is directly called from the PCI layer as an interrupt
- * service routine when the device is enabled with MSI-X multi-message
- * interrupt mode and there is a fast-path FCP IOCB ring event in the
- * HBA. However, when the device is enabled with either MSI or Pin-IRQ
- * interrupt mode, this function is called as part of the device-level
- * interrupt handler. When the PCI slot is in error recovery or the HBA
- * is undergoing initialization, the interrupt handler will not process
- * the interrupt. The SCSI FCP fast-path ring event are handled in the
- * intrrupt context. This function is called without any lock held. It
- * gets the hbalock to access and update SLI data structures.
+ * service routine when device with SLI-3 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures.
  *
  * This function returns IRQ_HANDLED when interrupt is handled else it
  * returns IRQ_NONE.
  **/
 irqreturn_t
-lpfc_fp_intr_handler(int irq, void *dev_id)
+lpfc_sli_fp_intr_handler(int irq, void *dev_id)
 {
        struct lpfc_hba  *phba;
        uint32_t ha_copy;
@@ -5513,13 +7983,8 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
         * individual interrupt handler in MSI-X multi-message interrupt mode
         */
        if (phba->intr_type == MSIX) {
-               /* If pci channel is offline, ignore all the interrupts */
-               if (unlikely(pci_channel_offline(phba->pcidev)))
-                       return IRQ_NONE;
-               /* Update device-level interrupt statistics */
-               phba->sli.slistat.sli_intr++;
-               /* Ignore all interrupts during initialization. */
-               if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+               /* Check device state for handling interrupt */
+               if (lpfc_intr_state_check(phba))
                        return IRQ_NONE;
                /* Need to read HA REG for FCP ring and other ring events */
                ha_copy = readl(phba->HAregaddr);
@@ -5530,7 +7995,7 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
                 * any interrupt.
                 */
                if (unlikely(phba->hba_flag & DEFER_ERATT)) {
-                       spin_unlock_irq(&phba->hbalock);
+                       spin_unlock_irqrestore(&phba->hbalock, iflag);
                        return IRQ_NONE;
                }
                writel((ha_copy & (HA_R0_CLR_MSK | HA_R1_CLR_MSK)),
@@ -5566,26 +8031,27 @@ lpfc_fp_intr_handler(int irq, void *dev_id)
                }
        }
        return IRQ_HANDLED;
-}  /* lpfc_fp_intr_handler */
+}  /* lpfc_sli_fp_intr_handler */
 
 /**
- * lpfc_intr_handler - The device-level interrupt handler of lpfc driver
+ * lpfc_sli_intr_handler - Device-level interrupt handler to SLI-3 device
  * @irq: Interrupt number.
  * @dev_id: The device context pointer.
  *
- * This function is the device-level interrupt handler called from the PCI
- * layer when either MSI or Pin-IRQ interrupt mode is enabled and there is
- * an event in the HBA which requires driver attention. This function
- * invokes the slow-path interrupt attention handling function and fast-path
- * interrupt attention handling function in turn to process the relevant
- * HBA attention events. This function is called without any lock held. It
- * gets the hbalock to access and update SLI data structures.
+ * This function is the HBA device-level interrupt handler to device with
+ * SLI-3 interface spec, called from the PCI layer when either MSI or
+ * Pin-IRQ interrupt mode is enabled and there is an event in the HBA which
+ * requires driver attention. This function invokes the slow-path interrupt
+ * attention handling function and fast-path interrupt attention handling
+ * function in turn to process the relevant HBA attention events. This
+ * function is called without any lock held. It gets the hbalock to access
+ * and update SLI data structures.
  *
  * This function returns IRQ_HANDLED when interrupt is handled, else it
  * returns IRQ_NONE.
  **/
 irqreturn_t
-lpfc_intr_handler(int irq, void *dev_id)
+lpfc_sli_intr_handler(int irq, void *dev_id)
 {
        struct lpfc_hba  *phba;
        irqreturn_t sp_irq_rc, fp_irq_rc;
@@ -5600,15 +8066,8 @@ lpfc_intr_handler(int irq, void *dev_id)
        if (unlikely(!phba))
                return IRQ_NONE;
 
-       /* If the pci channel is offline, ignore all the interrupts. */
-       if (unlikely(pci_channel_offline(phba->pcidev)))
-               return IRQ_NONE;
-
-       /* Update device level interrupt statistics */
-       phba->sli.slistat.sli_intr++;
-
-       /* Ignore all interrupts during initialization. */
-       if (unlikely(phba->link_state < LPFC_LINK_DOWN))
+       /* Check device state for handling interrupt */
+       if (lpfc_intr_state_check(phba))
                return IRQ_NONE;
 
        spin_lock(&phba->hbalock);
@@ -5650,7 +8109,7 @@ lpfc_intr_handler(int irq, void *dev_id)
        status2 >>= (4*LPFC_ELS_RING);
 
        if (status1 || (status2 & HA_RXMASK))
-               sp_irq_rc = lpfc_sp_intr_handler(irq, dev_id);
+               sp_irq_rc = lpfc_sli_sp_intr_handler(irq, dev_id);
        else
                sp_irq_rc = IRQ_NONE;
 
@@ -5670,10 +8129,3322 @@ lpfc_intr_handler(int irq, void *dev_id)
                status2 = 0;
 
        if ((status1 & HA_RXMASK) || (status2 & HA_RXMASK))
-               fp_irq_rc = lpfc_fp_intr_handler(irq, dev_id);
+               fp_irq_rc = lpfc_sli_fp_intr_handler(irq, dev_id);
        else
                fp_irq_rc = IRQ_NONE;
 
        /* Return device-level interrupt handling status */
        return (sp_irq_rc == IRQ_HANDLED) ? sp_irq_rc : fp_irq_rc;
-}  /* lpfc_intr_handler */
+}  /* lpfc_sli_intr_handler */
+
+/**
+ * lpfc_sli4_fcp_xri_abort_event_proc - Process fcp xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 FCP abort XRI events.
+ **/
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+       struct lpfc_cq_event *cq_event;
+
+       /* First, declare the fcp xri abort event has been handled */
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag &= ~FCP_XRI_ABORT_EVENT;
+       spin_unlock_irq(&phba->hbalock);
+       /* Now, handle all the fcp xri abort events */
+       while (!list_empty(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue)) {
+               /* Get the first event from the head of the event queue */
+               spin_lock_irq(&phba->hbalock);
+               list_remove_head(&phba->sli4_hba.sp_fcp_xri_aborted_work_queue,
+                                cq_event, struct lpfc_cq_event, list);
+               spin_unlock_irq(&phba->hbalock);
+               /* Notify aborted XRI for FCP work queue */
+               lpfc_sli4_fcp_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+               /* Free the event processed back to the free pool */
+               lpfc_sli4_cq_event_release(phba, cq_event);
+       }
+}
+
+/**
+ * lpfc_sli4_els_xri_abort_event_proc - Process els xri abort event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked by the worker thread to process all the pending
+ * SLI4 els abort xri events.
+ **/
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *phba)
+{
+       struct lpfc_cq_event *cq_event;
+
+       /* First, declare the els xri abort event has been handled */
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag &= ~ELS_XRI_ABORT_EVENT;
+       spin_unlock_irq(&phba->hbalock);
+       /* Now, handle all the els xri abort events */
+       while (!list_empty(&phba->sli4_hba.sp_els_xri_aborted_work_queue)) {
+               /* Get the first event from the head of the event queue */
+               spin_lock_irq(&phba->hbalock);
+               list_remove_head(&phba->sli4_hba.sp_els_xri_aborted_work_queue,
+                                cq_event, struct lpfc_cq_event, list);
+               spin_unlock_irq(&phba->hbalock);
+               /* Notify aborted XRI for ELS work queue */
+               lpfc_sli4_els_xri_aborted(phba, &cq_event->cqe.wcqe_axri);
+               /* Free the event processed back to the free pool */
+               lpfc_sli4_cq_event_release(phba, cq_event);
+       }
+}
+
+static void
+lpfc_sli4_iocb_param_transfer(struct lpfc_iocbq *pIocbIn,
+                             struct lpfc_iocbq *pIocbOut,
+                             struct lpfc_wcqe_complete *wcqe)
+{
+       size_t offset = offsetof(struct lpfc_iocbq, iocb);
+
+       memcpy((char *)pIocbIn + offset, (char *)pIocbOut + offset,
+              sizeof(struct lpfc_iocbq) - offset);
+       memset(&pIocbIn->sli4_info, 0,
+              sizeof(struct lpfc_sli4_rspiocb_info));
+       /* Map WCQE parameters into irspiocb parameters */
+       pIocbIn->iocb.ulpStatus = bf_get(lpfc_wcqe_c_status, wcqe);
+       if (pIocbOut->iocb_flag & LPFC_IO_FCP)
+               if (pIocbIn->iocb.ulpStatus == IOSTAT_FCP_RSP_ERROR)
+                       pIocbIn->iocb.un.fcpi.fcpi_parm =
+                                       pIocbOut->iocb.un.fcpi.fcpi_parm -
+                                       wcqe->total_data_placed;
+               else
+                       pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+       else
+               pIocbIn->iocb.un.ulpWord[4] = wcqe->parameter;
+       /* Load in additional WCQE parameters */
+       pIocbIn->sli4_info.hw_status = bf_get(lpfc_wcqe_c_hw_status, wcqe);
+       pIocbIn->sli4_info.bfield = 0;
+       if (bf_get(lpfc_wcqe_c_xb, wcqe))
+               pIocbIn->sli4_info.bfield |= LPFC_XB;
+       if (bf_get(lpfc_wcqe_c_pv, wcqe)) {
+               pIocbIn->sli4_info.bfield |= LPFC_PV;
+               pIocbIn->sli4_info.priority =
+                                       bf_get(lpfc_wcqe_c_priority, wcqe);
+       }
+}
+
+/**
+ * lpfc_sli4_sp_handle_async_event - Handle an asynchroous event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with asynchrous
+ * event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_async_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+       struct lpfc_cq_event *cq_event;
+       unsigned long iflags;
+
+       lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
+                       "0392 Async Event: word0:x%x, word1:x%x, "
+                       "word2:x%x, word3:x%x\n", mcqe->word0,
+                       mcqe->mcqe_tag0, mcqe->mcqe_tag1, mcqe->trailer);
+
+       /* Allocate a new internal CQ_EVENT entry */
+       cq_event = lpfc_sli4_cq_event_alloc(phba);
+       if (!cq_event) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0394 Failed to allocate CQ_EVENT entry\n");
+               return false;
+       }
+
+       /* Move the CQE into an asynchronous event entry */
+       memcpy(&cq_event->cqe, mcqe, sizeof(struct lpfc_mcqe));
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       list_add_tail(&cq_event->list, &phba->sli4_hba.sp_asynce_work_queue);
+       /* Set the async event flag */
+       phba->hba_flag |= ASYNC_EVENT;
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+       return true;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mbox_event - Handle a mailbox completion event
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry with mailbox
+ * completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mbox_event(struct lpfc_hba *phba, struct lpfc_mcqe *mcqe)
+{
+       uint32_t mcqe_status;
+       MAILBOX_t *mbox, *pmbox;
+       struct lpfc_mqe *mqe;
+       struct lpfc_vport *vport;
+       struct lpfc_nodelist *ndlp;
+       struct lpfc_dmabuf *mp;
+       unsigned long iflags;
+       LPFC_MBOXQ_t *pmb;
+       bool workposted = false;
+       int rc;
+
+       /* If not a mailbox complete MCQE, out by checking mailbox consume */
+       if (!bf_get(lpfc_trailer_completed, mcqe))
+               goto out_no_mqe_complete;
+
+       /* Get the reference to the active mbox command */
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       pmb = phba->sli.mbox_active;
+       if (unlikely(!pmb)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "1832 No pending MBOX command to handle\n");
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               goto out_no_mqe_complete;
+       }
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       mqe = &pmb->u.mqe;
+       pmbox = (MAILBOX_t *)&pmb->u.mqe;
+       mbox = phba->mbox;
+       vport = pmb->vport;
+
+       /* Reset heartbeat timer */
+       phba->last_completion_time = jiffies;
+       del_timer(&phba->sli.mbox_tmo);
+
+       /* Move mbox data to caller's mailbox region, do endian swapping */
+       if (pmb->mbox_cmpl && mbox)
+               lpfc_sli_pcimem_bcopy(mbox, mqe, sizeof(struct lpfc_mqe));
+       /* Set the mailbox status with SLI4 range 0x4000 */
+       mcqe_status = bf_get(lpfc_mcqe_status, mcqe);
+       if (mcqe_status != MB_CQE_STATUS_SUCCESS)
+               bf_set(lpfc_mqe_status, mqe,
+                      (LPFC_MBX_ERROR_RANGE | mcqe_status));
+
+       if (pmb->mbox_flag & LPFC_MBX_IMED_UNREG) {
+               pmb->mbox_flag &= ~LPFC_MBX_IMED_UNREG;
+               lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_MBOX_VPORT,
+                                     "MBOX dflt rpi: status:x%x rpi:x%x",
+                                     mcqe_status,
+                                     pmbox->un.varWords[0], 0);
+               if (mcqe_status == MB_CQE_STATUS_SUCCESS) {
+                       mp = (struct lpfc_dmabuf *)(pmb->context1);
+                       ndlp = (struct lpfc_nodelist *)pmb->context2;
+                       /* Reg_LOGIN of dflt RPI was successful. Now lets get
+                        * RID of the PPI using the same mbox buffer.
+                        */
+                       lpfc_unreg_login(phba, vport->vpi,
+                                        pmbox->un.varWords[0], pmb);
+                       pmb->mbox_cmpl = lpfc_mbx_cmpl_dflt_rpi;
+                       pmb->context1 = mp;
+                       pmb->context2 = ndlp;
+                       pmb->vport = vport;
+                       rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+                       if (rc != MBX_BUSY)
+                               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX |
+                                               LOG_SLI, "0385 rc should "
+                                               "have been MBX_BUSY\n");
+                       if (rc != MBX_NOT_FINISHED)
+                               goto send_current_mbox;
+               }
+       }
+       spin_lock_irqsave(&phba->pport->work_port_lock, iflags);
+       phba->pport->work_port_events &= ~WORKER_MBOX_TMO;
+       spin_unlock_irqrestore(&phba->pport->work_port_lock, iflags);
+
+       /* There is mailbox completion work to do */
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       __lpfc_mbox_cmpl_put(phba, pmb);
+       phba->work_ha |= HA_MBATT;
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       workposted = true;
+
+send_current_mbox:
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       /* Release the mailbox command posting token */
+       phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
+       /* Setting active mailbox pointer need to be in sync to flag clear */
+       phba->sli.mbox_active = NULL;
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       /* Wake up worker thread to post the next pending mailbox command */
+       lpfc_worker_wake_up(phba);
+out_no_mqe_complete:
+       if (bf_get(lpfc_trailer_consumed, mcqe))
+               lpfc_sli4_mq_release(phba->sli4_hba.mbx_wq);
+       return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_mcqe - Process a mailbox completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cqe: Pointer to mailbox completion queue entry.
+ *
+ * This routine process a mailbox completion queue entry, it invokes the
+ * proper mailbox complete handling or asynchrous event handling routine
+ * according to the MCQE's async bit.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_mcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+       struct lpfc_mcqe mcqe;
+       bool workposted;
+
+       /* Copy the mailbox MCQE and convert endian order as needed */
+       lpfc_sli_pcimem_bcopy(cqe, &mcqe, sizeof(struct lpfc_mcqe));
+
+       /* Invoke the proper event handling routine */
+       if (!bf_get(lpfc_trailer_async, &mcqe))
+               workposted = lpfc_sli4_sp_handle_mbox_event(phba, &mcqe);
+       else
+               workposted = lpfc_sli4_sp_handle_async_event(phba, &mcqe);
+       return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_els_wcqe - Handle els work-queue completion event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an ELS work-queue completion event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_els_wcqe(struct lpfc_hba *phba,
+                            struct lpfc_wcqe_complete *wcqe)
+{
+       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+       struct lpfc_iocbq *cmdiocbq;
+       struct lpfc_iocbq *irspiocbq;
+       unsigned long iflags;
+       bool workposted = false;
+
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       pring->stats.iocb_event++;
+       /* Look up the ELS command IOCB and create pseudo response IOCB */
+       cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+                               bf_get(lpfc_wcqe_c_request_tag, wcqe));
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+       if (unlikely(!cmdiocbq)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "0386 ELS complete with no corresponding "
+                               "cmdiocb: iotag (%d)\n",
+                               bf_get(lpfc_wcqe_c_request_tag, wcqe));
+               return workposted;
+       }
+
+       /* Fake the irspiocbq and copy necessary response information */
+       irspiocbq = lpfc_sli_get_iocbq(phba);
+       if (!irspiocbq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0387 Failed to allocate an iocbq\n");
+               return workposted;
+       }
+       lpfc_sli4_iocb_param_transfer(irspiocbq, cmdiocbq, wcqe);
+
+       /* Add the irspiocb to the response IOCB work list */
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       list_add_tail(&irspiocbq->list, &phba->sli4_hba.sp_rspiocb_work_queue);
+       /* Indicate ELS ring attention */
+       phba->work_ha |= (HA_R0ATT << (4*LPFC_ELS_RING));
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       workposted = true;
+
+       return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rel_wcqe - Handle slow-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles slow-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_sp_handle_rel_wcqe(struct lpfc_hba *phba,
+                            struct lpfc_wcqe_release *wcqe)
+{
+       /* Check for the slow-path ELS work queue */
+       if (bf_get(lpfc_wcqe_r_wq_id, wcqe) == phba->sli4_hba.els_wq->queue_id)
+               lpfc_sli4_wq_release(phba->sli4_hba.els_wq,
+                                    bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+       else
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "2579 Slow-path wqe consume event carries "
+                               "miss-matched qid: wcqe-qid=x%x, sp-qid=x%x\n",
+                               bf_get(lpfc_wcqe_r_wqe_index, wcqe),
+                               phba->sli4_hba.els_wq->queue_id);
+}
+
+/**
+ * lpfc_sli4_sp_handle_abort_xri_wcqe - Handle a xri abort event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to a WQ completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an XRI abort event.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_abort_xri_wcqe(struct lpfc_hba *phba,
+                                  struct lpfc_queue *cq,
+                                  struct sli4_wcqe_xri_aborted *wcqe)
+{
+       bool workposted = false;
+       struct lpfc_cq_event *cq_event;
+       unsigned long iflags;
+
+       /* Allocate a new internal CQ_EVENT entry */
+       cq_event = lpfc_sli4_cq_event_alloc(phba);
+       if (!cq_event) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0602 Failed to allocate CQ_EVENT entry\n");
+               return false;
+       }
+
+       /* Move the CQE into the proper xri abort event list */
+       memcpy(&cq_event->cqe, wcqe, sizeof(struct sli4_wcqe_xri_aborted));
+       switch (cq->subtype) {
+       case LPFC_FCP:
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               list_add_tail(&cq_event->list,
+                             &phba->sli4_hba.sp_fcp_xri_aborted_work_queue);
+               /* Set the fcp xri abort event flag */
+               phba->hba_flag |= FCP_XRI_ABORT_EVENT;
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               workposted = true;
+               break;
+       case LPFC_ELS:
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               list_add_tail(&cq_event->list,
+                             &phba->sli4_hba.sp_els_xri_aborted_work_queue);
+               /* Set the els xri abort event flag */
+               phba->hba_flag |= ELS_XRI_ABORT_EVENT;
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               workposted = true;
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0603 Invalid work queue CQE subtype (x%x)\n",
+                               cq->subtype);
+               workposted = false;
+               break;
+       }
+       return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_wcqe - Process a work-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to the completion queue.
+ * @wcqe: Pointer to a completion queue entry.
+ *
+ * This routine process a slow-path work-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+                        struct lpfc_cqe *cqe)
+{
+       struct lpfc_wcqe_complete wcqe;
+       bool workposted = false;
+
+       /* Copy the work queue CQE and convert endian order if needed */
+       lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+       /* Check and process for different type of WCQE and dispatch */
+       switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+       case CQE_CODE_COMPL_WQE:
+               /* Process the WQ complete event */
+               workposted = lpfc_sli4_sp_handle_els_wcqe(phba,
+                                       (struct lpfc_wcqe_complete *)&wcqe);
+               break;
+       case CQE_CODE_RELEASE_WQE:
+               /* Process the WQ release event */
+               lpfc_sli4_sp_handle_rel_wcqe(phba,
+                                       (struct lpfc_wcqe_release *)&wcqe);
+               break;
+       case CQE_CODE_XRI_ABORTED:
+               /* Process the WQ XRI abort event */
+               workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+                                       (struct sli4_wcqe_xri_aborted *)&wcqe);
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0388 Not a valid WCQE code: x%x\n",
+                               bf_get(lpfc_wcqe_c_code, &wcqe));
+               break;
+       }
+       return workposted;
+}
+
+/**
+ * lpfc_sli4_sp_handle_rcqe - Process a receive-queue completion queue entry
+ * @phba: Pointer to HBA context object.
+ * @rcqe: Pointer to receive-queue completion queue entry.
+ *
+ * This routine process a receive-queue completion queue entry.
+ *
+ * Return: true if work posted to worker thread, otherwise false.
+ **/
+static bool
+lpfc_sli4_sp_handle_rcqe(struct lpfc_hba *phba, struct lpfc_cqe *cqe)
+{
+       struct lpfc_rcqe rcqe;
+       bool workposted = false;
+       struct lpfc_queue *hrq = phba->sli4_hba.hdr_rq;
+       struct lpfc_queue *drq = phba->sli4_hba.dat_rq;
+       struct hbq_dmabuf *dma_buf;
+       uint32_t status;
+       unsigned long iflags;
+
+       /* Copy the receive queue CQE and convert endian order if needed */
+       lpfc_sli_pcimem_bcopy(cqe, &rcqe, sizeof(struct lpfc_rcqe));
+       lpfc_sli4_rq_release(hrq, drq);
+       if (bf_get(lpfc_rcqe_code, &rcqe) != CQE_CODE_RECEIVE)
+               goto out;
+       if (bf_get(lpfc_rcqe_rq_id, &rcqe) != hrq->queue_id)
+               goto out;
+
+       status = bf_get(lpfc_rcqe_status, &rcqe);
+       switch (status) {
+       case FC_STATUS_RQ_BUF_LEN_EXCEEDED:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2537 Receive Frame Truncated!!\n");
+       case FC_STATUS_RQ_SUCCESS:
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               dma_buf = lpfc_sli_hbqbuf_get(&phba->hbqs[0].hbq_buffer_list);
+               if (!dma_buf) {
+                       spin_unlock_irqrestore(&phba->hbalock, iflags);
+                       goto out;
+               }
+               memcpy(&dma_buf->rcqe, &rcqe, sizeof(rcqe));
+               /* save off the frame for the word thread to process */
+               list_add_tail(&dma_buf->dbuf.list, &phba->rb_pend_list);
+               /* Frame received */
+               phba->hba_flag |= HBA_RECEIVE_BUFFER;
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               workposted = true;
+               break;
+       case FC_STATUS_INSUFF_BUF_NEED_BUF:
+       case FC_STATUS_INSUFF_BUF_FRM_DISC:
+               /* Post more buffers if possible */
+               spin_lock_irqsave(&phba->hbalock, iflags);
+               phba->hba_flag |= HBA_POST_RECEIVE_BUFFER;
+               spin_unlock_irqrestore(&phba->hbalock, iflags);
+               workposted = true;
+               break;
+       }
+out:
+       return workposted;
+
+}
+
+/**
+ * lpfc_sli4_sp_handle_eqe - Process a slow-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the slow-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on that completion queue, rearm the
+ * completion queue, and then return.
+ *
+ **/
+static void
+lpfc_sli4_sp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe)
+{
+       struct lpfc_queue *cq = NULL, *childq, *speq;
+       struct lpfc_cqe *cqe;
+       bool workposted = false;
+       int ecount = 0;
+       uint16_t cqid;
+
+       if (bf_get(lpfc_eqe_major_code, eqe) != 0 ||
+           bf_get(lpfc_eqe_minor_code, eqe) != 0) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0359 Not a valid slow-path completion "
+                               "event: majorcode=x%x, minorcode=x%x\n",
+                               bf_get(lpfc_eqe_major_code, eqe),
+                               bf_get(lpfc_eqe_minor_code, eqe));
+               return;
+       }
+
+       /* Get the reference to the corresponding CQ */
+       cqid = bf_get(lpfc_eqe_resource_id, eqe);
+
+       /* Search for completion queue pointer matching this cqid */
+       speq = phba->sli4_hba.sp_eq;
+       list_for_each_entry(childq, &speq->child_list, list) {
+               if (childq->queue_id == cqid) {
+                       cq = childq;
+                       break;
+               }
+       }
+       if (unlikely(!cq)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0365 Slow-path CQ identifier (%d) does "
+                               "not exist\n", cqid);
+               return;
+       }
+
+       /* Process all the entries to the CQ */
+       switch (cq->type) {
+       case LPFC_MCQ:
+               while ((cqe = lpfc_sli4_cq_get(cq))) {
+                       workposted |= lpfc_sli4_sp_handle_mcqe(phba, cqe);
+                       if (!(++ecount % LPFC_GET_QE_REL_INT))
+                               lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+               }
+               break;
+       case LPFC_WCQ:
+               while ((cqe = lpfc_sli4_cq_get(cq))) {
+                       workposted |= lpfc_sli4_sp_handle_wcqe(phba, cq, cqe);
+                       if (!(++ecount % LPFC_GET_QE_REL_INT))
+                               lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+               }
+               break;
+       case LPFC_RCQ:
+               while ((cqe = lpfc_sli4_cq_get(cq))) {
+                       workposted |= lpfc_sli4_sp_handle_rcqe(phba, cqe);
+                       if (!(++ecount % LPFC_GET_QE_REL_INT))
+                               lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+               }
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0370 Invalid completion queue type (%d)\n",
+                               cq->type);
+               return;
+       }
+
+       /* Catch the no cq entry condition, log an error */
+       if (unlikely(ecount == 0))
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0371 No entry from the CQ: identifier "
+                               "(x%x), type (%d)\n", cq->queue_id, cq->type);
+
+       /* In any case, flash and re-arm the RCQ */
+       lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+       /* wake up worker thread if there are works to be done */
+       if (workposted)
+               lpfc_worker_wake_up(phba);
+}
+
+/**
+ * lpfc_sli4_fp_handle_fcp_wcqe - Process fast-path work queue completion entry
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static void
+lpfc_sli4_fp_handle_fcp_wcqe(struct lpfc_hba *phba,
+                            struct lpfc_wcqe_complete *wcqe)
+{
+       struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_FCP_RING];
+       struct lpfc_iocbq *cmdiocbq;
+       struct lpfc_iocbq irspiocbq;
+       unsigned long iflags;
+
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       pring->stats.iocb_event++;
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+
+       /* Check for response status */
+       if (unlikely(bf_get(lpfc_wcqe_c_status, wcqe))) {
+               /* If resource errors reported from HBA, reduce queue
+                * depth of the SCSI device.
+                */
+               if ((bf_get(lpfc_wcqe_c_status, wcqe) ==
+                    IOSTAT_LOCAL_REJECT) &&
+                   (wcqe->parameter == IOERR_NO_RESOURCES)) {
+                       phba->lpfc_rampdown_queue_depth(phba);
+               }
+               /* Log the error status */
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "0373 FCP complete error: status=x%x, "
+                               "hw_status=x%x, total_data_specified=%d, "
+                               "parameter=x%x, word3=x%x\n",
+                               bf_get(lpfc_wcqe_c_status, wcqe),
+                               bf_get(lpfc_wcqe_c_hw_status, wcqe),
+                               wcqe->total_data_placed, wcqe->parameter,
+                               wcqe->word3);
+       }
+
+       /* Look up the FCP command IOCB and create pseudo response IOCB */
+       spin_lock_irqsave(&phba->hbalock, iflags);
+       cmdiocbq = lpfc_sli_iocbq_lookup_by_tag(phba, pring,
+                               bf_get(lpfc_wcqe_c_request_tag, wcqe));
+       spin_unlock_irqrestore(&phba->hbalock, iflags);
+       if (unlikely(!cmdiocbq)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "0374 FCP complete with no corresponding "
+                               "cmdiocb: iotag (%d)\n",
+                               bf_get(lpfc_wcqe_c_request_tag, wcqe));
+               return;
+       }
+       if (unlikely(!cmdiocbq->iocb_cmpl)) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "0375 FCP cmdiocb not callback function "
+                               "iotag: (%d)\n",
+                               bf_get(lpfc_wcqe_c_request_tag, wcqe));
+               return;
+       }
+
+       /* Fake the irspiocb and copy necessary response information */
+       lpfc_sli4_iocb_param_transfer(&irspiocbq, cmdiocbq, wcqe);
+
+       /* Pass the cmd_iocb and the rsp state to the upper layer */
+       (cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
+}
+
+/**
+ * lpfc_sli4_fp_handle_rel_wcqe - Handle fast-path WQ entry consumed event
+ * @phba: Pointer to HBA context object.
+ * @cq: Pointer to completion queue.
+ * @wcqe: Pointer to work-queue completion queue entry.
+ *
+ * This routine handles an fast-path WQ entry comsumed event by invoking the
+ * proper WQ release routine to the slow-path WQ.
+ **/
+static void
+lpfc_sli4_fp_handle_rel_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+                            struct lpfc_wcqe_release *wcqe)
+{
+       struct lpfc_queue *childwq;
+       bool wqid_matched = false;
+       uint16_t fcp_wqid;
+
+       /* Check for fast-path FCP work queue release */
+       fcp_wqid = bf_get(lpfc_wcqe_r_wq_id, wcqe);
+       list_for_each_entry(childwq, &cq->child_list, list) {
+               if (childwq->queue_id == fcp_wqid) {
+                       lpfc_sli4_wq_release(childwq,
+                                       bf_get(lpfc_wcqe_r_wqe_index, wcqe));
+                       wqid_matched = true;
+                       break;
+               }
+       }
+       /* Report warning log message if no match found */
+       if (wqid_matched != true)
+               lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                               "2580 Fast-path wqe consume event carries "
+                               "miss-matched qid: wcqe-qid=x%x\n", fcp_wqid);
+}
+
+/**
+ * lpfc_sli4_fp_handle_wcqe - Process fast-path work queue completion entry
+ * @cq: Pointer to the completion queue.
+ * @eqe: Pointer to fast-path completion queue entry.
+ *
+ * This routine process a fast-path work queue completion entry from fast-path
+ * event queue for FCP command response completion.
+ **/
+static int
+lpfc_sli4_fp_handle_wcqe(struct lpfc_hba *phba, struct lpfc_queue *cq,
+                        struct lpfc_cqe *cqe)
+{
+       struct lpfc_wcqe_release wcqe;
+       bool workposted = false;
+
+       /* Copy the work queue CQE and convert endian order if needed */
+       lpfc_sli_pcimem_bcopy(cqe, &wcqe, sizeof(struct lpfc_cqe));
+
+       /* Check and process for different type of WCQE and dispatch */
+       switch (bf_get(lpfc_wcqe_c_code, &wcqe)) {
+       case CQE_CODE_COMPL_WQE:
+               /* Process the WQ complete event */
+               lpfc_sli4_fp_handle_fcp_wcqe(phba,
+                               (struct lpfc_wcqe_complete *)&wcqe);
+               break;
+       case CQE_CODE_RELEASE_WQE:
+               /* Process the WQ release event */
+               lpfc_sli4_fp_handle_rel_wcqe(phba, cq,
+                               (struct lpfc_wcqe_release *)&wcqe);
+               break;
+       case CQE_CODE_XRI_ABORTED:
+               /* Process the WQ XRI abort event */
+               workposted = lpfc_sli4_sp_handle_abort_xri_wcqe(phba, cq,
+                               (struct sli4_wcqe_xri_aborted *)&wcqe);
+               break;
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0144 Not a valid WCQE code: x%x\n",
+                               bf_get(lpfc_wcqe_c_code, &wcqe));
+               break;
+       }
+       return workposted;
+}
+
+/**
+ * lpfc_sli4_fp_handle_eqe - Process a fast-path event queue entry
+ * @phba: Pointer to HBA context object.
+ * @eqe: Pointer to fast-path event queue entry.
+ *
+ * This routine process a event queue entry from the fast-path event queue.
+ * It will check the MajorCode and MinorCode to determine this is for a
+ * completion event on a completion queue, if not, an error shall be logged
+ * and just return. Otherwise, it will get to the corresponding completion
+ * queue and process all the entries on the completion queue, rearm the
+ * completion queue, and then return.
+ **/
+static void
+lpfc_sli4_fp_handle_eqe(struct lpfc_hba *phba, struct lpfc_eqe *eqe,
+                       uint32_t fcp_cqidx)
+{
+       struct lpfc_queue *cq;
+       struct lpfc_cqe *cqe;
+       bool workposted = false;
+       uint16_t cqid;
+       int ecount = 0;
+
+       if (unlikely(bf_get(lpfc_eqe_major_code, eqe) != 0) ||
+           unlikely(bf_get(lpfc_eqe_minor_code, eqe) != 0)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0366 Not a valid fast-path completion "
+                               "event: majorcode=x%x, minorcode=x%x\n",
+                               bf_get(lpfc_eqe_major_code, eqe),
+                               bf_get(lpfc_eqe_minor_code, eqe));
+               return;
+       }
+
+       cq = phba->sli4_hba.fcp_cq[fcp_cqidx];
+       if (unlikely(!cq)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0367 Fast-path completion queue does not "
+                               "exist\n");
+               return;
+       }
+
+       /* Get the reference to the corresponding CQ */
+       cqid = bf_get(lpfc_eqe_resource_id, eqe);
+       if (unlikely(cqid != cq->queue_id)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0368 Miss-matched fast-path completion "
+                               "queue identifier: eqcqid=%d, fcpcqid=%d\n",
+                               cqid, cq->queue_id);
+               return;
+       }
+
+       /* Process all the entries to the CQ */
+       while ((cqe = lpfc_sli4_cq_get(cq))) {
+               workposted |= lpfc_sli4_fp_handle_wcqe(phba, cq, cqe);
+               if (!(++ecount % LPFC_GET_QE_REL_INT))
+                       lpfc_sli4_cq_release(cq, LPFC_QUEUE_NOARM);
+       }
+
+       /* Catch the no cq entry condition */
+       if (unlikely(ecount == 0))
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0369 No entry from fast-path completion "
+                               "queue fcpcqid=%d\n", cq->queue_id);
+
+       /* In any case, flash and re-arm the CQ */
+       lpfc_sli4_cq_release(cq, LPFC_QUEUE_REARM);
+
+       /* wake up worker thread if there are works to be done */
+       if (workposted)
+               lpfc_worker_wake_up(phba);
+}
+
+static void
+lpfc_sli4_eq_flush(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+       struct lpfc_eqe *eqe;
+
+       /* walk all the EQ entries and drop on the floor */
+       while ((eqe = lpfc_sli4_eq_get(eq)))
+               ;
+
+       /* Clear and re-arm the EQ */
+       lpfc_sli4_eq_release(eq, LPFC_QUEUE_REARM);
+}
+
+/**
+ * lpfc_sli4_sp_intr_handler - Slow-path interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there are slow-path events in
+ * the HBA. However, when the device is enabled with either MSI or Pin-IRQ
+ * interrupt mode, this function is called as part of the device-level
+ * interrupt handler. When the PCI slot is in error recovery or the HBA is
+ * undergoing initialization, the interrupt handler will not process the
+ * interrupt. The link attention and ELS ring attention events are handled
+ * by the worker thread. The interrupt handler signals the worker thread
+ * and returns for these events. This function is called without any lock
+ * held. It gets the hbalock to access and update SLI data structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_sp_intr_handler(int irq, void *dev_id)
+{
+       struct lpfc_hba *phba;
+       struct lpfc_queue *speq;
+       struct lpfc_eqe *eqe;
+       unsigned long iflag;
+       int ecount = 0;
+
+       /*
+        * Get the driver's phba structure from the dev_id
+        */
+       phba = (struct lpfc_hba *)dev_id;
+
+       if (unlikely(!phba))
+               return IRQ_NONE;
+
+       /* Get to the EQ struct associated with this vector */
+       speq = phba->sli4_hba.sp_eq;
+
+       /* Check device state for handling interrupt */
+       if (unlikely(lpfc_intr_state_check(phba))) {
+               /* Check again for link_state with lock held */
+               spin_lock_irqsave(&phba->hbalock, iflag);
+               if (phba->link_state < LPFC_LINK_DOWN)
+                       /* Flush, clear interrupt, and rearm the EQ */
+                       lpfc_sli4_eq_flush(phba, speq);
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
+               return IRQ_NONE;
+       }
+
+       /*
+        * Process all the event on FCP slow-path EQ
+        */
+       while ((eqe = lpfc_sli4_eq_get(speq))) {
+               lpfc_sli4_sp_handle_eqe(phba, eqe);
+               if (!(++ecount % LPFC_GET_QE_REL_INT))
+                       lpfc_sli4_eq_release(speq, LPFC_QUEUE_NOARM);
+       }
+
+       /* Always clear and re-arm the slow-path EQ */
+       lpfc_sli4_eq_release(speq, LPFC_QUEUE_REARM);
+
+       /* Catch the no cq entry condition */
+       if (unlikely(ecount == 0)) {
+               if (phba->intr_type == MSIX)
+                       /* MSI-X treated interrupt served as no EQ share INT */
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                                       "0357 MSI-X interrupt with no EQE\n");
+               else
+                       /* Non MSI-X treated on interrupt as EQ share INT */
+                       return IRQ_NONE;
+       }
+
+       return IRQ_HANDLED;
+} /* lpfc_sli4_sp_intr_handler */
+
+/**
+ * lpfc_sli4_fp_intr_handler - Fast-path interrupt handler to SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is directly called from the PCI layer as an interrupt
+ * service routine when device with SLI-4 interface spec is enabled with
+ * MSI-X multi-message interrupt mode and there is a fast-path FCP IOCB
+ * ring event in the HBA. However, when the device is enabled with either
+ * MSI or Pin-IRQ interrupt mode, this function is called as part of the
+ * device-level interrupt handler. When the PCI slot is in error recovery
+ * or the HBA is undergoing initialization, the interrupt handler will not
+ * process the interrupt. The SCSI FCP fast-path ring event are handled in
+ * the intrrupt context. This function is called without any lock held.
+ * It gets the hbalock to access and update SLI data structures. Note that,
+ * the FCP EQ to FCP CQ are one-to-one map such that the FCP EQ index is
+ * equal to that of FCP CQ index.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_fp_intr_handler(int irq, void *dev_id)
+{
+       struct lpfc_hba *phba;
+       struct lpfc_fcp_eq_hdl *fcp_eq_hdl;
+       struct lpfc_queue *fpeq;
+       struct lpfc_eqe *eqe;
+       unsigned long iflag;
+       int ecount = 0;
+       uint32_t fcp_eqidx;
+
+       /* Get the driver's phba structure from the dev_id */
+       fcp_eq_hdl = (struct lpfc_fcp_eq_hdl *)dev_id;
+       phba = fcp_eq_hdl->phba;
+       fcp_eqidx = fcp_eq_hdl->idx;
+
+       if (unlikely(!phba))
+               return IRQ_NONE;
+
+       /* Get to the EQ struct associated with this vector */
+       fpeq = phba->sli4_hba.fp_eq[fcp_eqidx];
+
+       /* Check device state for handling interrupt */
+       if (unlikely(lpfc_intr_state_check(phba))) {
+               /* Check again for link_state with lock held */
+               spin_lock_irqsave(&phba->hbalock, iflag);
+               if (phba->link_state < LPFC_LINK_DOWN)
+                       /* Flush, clear interrupt, and rearm the EQ */
+                       lpfc_sli4_eq_flush(phba, fpeq);
+               spin_unlock_irqrestore(&phba->hbalock, iflag);
+               return IRQ_NONE;
+       }
+
+       /*
+        * Process all the event on FCP fast-path EQ
+        */
+       while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+               lpfc_sli4_fp_handle_eqe(phba, eqe, fcp_eqidx);
+               if (!(++ecount % LPFC_GET_QE_REL_INT))
+                       lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
+       }
+
+       /* Always clear and re-arm the fast-path EQ */
+       lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_REARM);
+
+       if (unlikely(ecount == 0)) {
+               if (phba->intr_type == MSIX)
+                       /* MSI-X treated interrupt served as no EQ share INT */
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                                       "0358 MSI-X interrupt with no EQE\n");
+               else
+                       /* Non MSI-X treated on interrupt as EQ share INT */
+                       return IRQ_NONE;
+       }
+
+       return IRQ_HANDLED;
+} /* lpfc_sli4_fp_intr_handler */
+
+/**
+ * lpfc_sli4_intr_handler - Device-level interrupt handler for SLI-4 device
+ * @irq: Interrupt number.
+ * @dev_id: The device context pointer.
+ *
+ * This function is the device-level interrupt handler to device with SLI-4
+ * interface spec, called from the PCI layer when either MSI or Pin-IRQ
+ * interrupt mode is enabled and there is an event in the HBA which requires
+ * driver attention. This function invokes the slow-path interrupt attention
+ * handling function and fast-path interrupt attention handling function in
+ * turn to process the relevant HBA attention events. This function is called
+ * without any lock held. It gets the hbalock to access and update SLI data
+ * structures.
+ *
+ * This function returns IRQ_HANDLED when interrupt is handled, else it
+ * returns IRQ_NONE.
+ **/
+irqreturn_t
+lpfc_sli4_intr_handler(int irq, void *dev_id)
+{
+       struct lpfc_hba  *phba;
+       irqreturn_t sp_irq_rc, fp_irq_rc;
+       bool fp_handled = false;
+       uint32_t fcp_eqidx;
+
+       /* Get the driver's phba structure from the dev_id */
+       phba = (struct lpfc_hba *)dev_id;
+
+       if (unlikely(!phba))
+               return IRQ_NONE;
+
+       /*
+        * Invokes slow-path host attention interrupt handling as appropriate.
+        */
+       sp_irq_rc = lpfc_sli4_sp_intr_handler(irq, dev_id);
+
+       /*
+        * Invoke fast-path host attention interrupt handling as appropriate.
+        */
+       for (fcp_eqidx = 0; fcp_eqidx < phba->cfg_fcp_eq_count; fcp_eqidx++) {
+               fp_irq_rc = lpfc_sli4_fp_intr_handler(irq,
+                                       &phba->sli4_hba.fcp_eq_hdl[fcp_eqidx]);
+               if (fp_irq_rc == IRQ_HANDLED)
+                       fp_handled |= true;
+       }
+
+       return (fp_handled == true) ? IRQ_HANDLED : sp_irq_rc;
+} /* lpfc_sli4_intr_handler */
+
+/**
+ * lpfc_sli4_queue_free - free a queue structure and associated memory
+ * @queue: The queue structure to free.
+ *
+ * This function frees a queue structure and the DMAable memeory used for
+ * the host resident queue. This function must be called after destroying the
+ * queue on the HBA.
+ **/
+void
+lpfc_sli4_queue_free(struct lpfc_queue *queue)
+{
+       struct lpfc_dmabuf *dmabuf;
+
+       if (!queue)
+               return;
+
+       while (!list_empty(&queue->page_list)) {
+               list_remove_head(&queue->page_list, dmabuf, struct lpfc_dmabuf,
+                                list);
+               dma_free_coherent(&queue->phba->pcidev->dev, PAGE_SIZE,
+                                 dmabuf->virt, dmabuf->phys);
+               kfree(dmabuf);
+       }
+       kfree(queue);
+       return;
+}
+
+/**
+ * lpfc_sli4_queue_alloc - Allocate and initialize a queue structure
+ * @phba: The HBA that this queue is being created on.
+ * @entry_size: The size of each queue entry for this queue.
+ * @entry count: The number of entries that this queue will handle.
+ *
+ * This function allocates a queue structure and the DMAable memory used for
+ * the host resident queue. This function must be called before creating the
+ * queue on the HBA.
+ **/
+struct lpfc_queue *
+lpfc_sli4_queue_alloc(struct lpfc_hba *phba, uint32_t entry_size,
+                     uint32_t entry_count)
+{
+       struct lpfc_queue *queue;
+       struct lpfc_dmabuf *dmabuf;
+       int x, total_qe_count;
+       void *dma_pointer;
+
+
+       queue = kzalloc(sizeof(struct lpfc_queue) +
+                       (sizeof(union sli4_qe) * entry_count), GFP_KERNEL);
+       if (!queue)
+               return NULL;
+       queue->page_count = (PAGE_ALIGN(entry_size * entry_count))/PAGE_SIZE;
+       INIT_LIST_HEAD(&queue->list);
+       INIT_LIST_HEAD(&queue->page_list);
+       INIT_LIST_HEAD(&queue->child_list);
+       for (x = 0, total_qe_count = 0; x < queue->page_count; x++) {
+               dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+               if (!dmabuf)
+                       goto out_fail;
+               dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
+                                                 PAGE_SIZE, &dmabuf->phys,
+                                                 GFP_KERNEL);
+               if (!dmabuf->virt) {
+                       kfree(dmabuf);
+                       goto out_fail;
+               }
+               dmabuf->buffer_tag = x;
+               list_add_tail(&dmabuf->list, &queue->page_list);
+               /* initialize queue's entry array */
+               dma_pointer = dmabuf->virt;
+               for (; total_qe_count < entry_count &&
+                    dma_pointer < (PAGE_SIZE + dmabuf->virt);
+                    total_qe_count++, dma_pointer += entry_size) {
+                       queue->qe[total_qe_count].address = dma_pointer;
+               }
+       }
+       queue->entry_size = entry_size;
+       queue->entry_count = entry_count;
+       queue->phba = phba;
+
+       return queue;
+out_fail:
+       lpfc_sli4_queue_free(queue);
+       return NULL;
+}
+
+/**
+ * lpfc_eq_create - Create an Event Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @eq: The queue structure to use to create the event queue.
+ * @imax: The maximum interrupt per second limit.
+ *
+ * This function creates an event queue, as detailed in @eq, on a port,
+ * described by @phba by sending an EQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @eq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the EQ_CREATE mailbox command to the HBA to setup the
+ * event queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint16_t imax)
+{
+       struct lpfc_mbx_eq_create *eq_create;
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       struct lpfc_dmabuf *dmabuf;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+       uint16_t dmult;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_eq_create) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_EQ_CREATE,
+                        length, LPFC_SLI4_MBX_EMBED);
+       eq_create = &mbox->u.mqe.un.eq_create;
+       bf_set(lpfc_mbx_eq_create_num_pages, &eq_create->u.request,
+              eq->page_count);
+       bf_set(lpfc_eq_context_size, &eq_create->u.request.context,
+              LPFC_EQE_SIZE);
+       bf_set(lpfc_eq_context_valid, &eq_create->u.request.context, 1);
+       /* Calculate delay multiper from maximum interrupt per second */
+       dmult = LPFC_DMULT_CONST/imax - 1;
+       bf_set(lpfc_eq_context_delay_multi, &eq_create->u.request.context,
+              dmult);
+       switch (eq->entry_count) {
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0360 Unsupported EQ count. (%d)\n",
+                               eq->entry_count);
+               if (eq->entry_count < 256)
+                       return -EINVAL;
+               /* otherwise default to smallest count (drop through) */
+       case 256:
+               bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+                      LPFC_EQ_CNT_256);
+               break;
+       case 512:
+               bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+                      LPFC_EQ_CNT_512);
+               break;
+       case 1024:
+               bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+                      LPFC_EQ_CNT_1024);
+               break;
+       case 2048:
+               bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+                      LPFC_EQ_CNT_2048);
+               break;
+       case 4096:
+               bf_set(lpfc_eq_context_count, &eq_create->u.request.context,
+                      LPFC_EQ_CNT_4096);
+               break;
+       }
+       list_for_each_entry(dmabuf, &eq->page_list, list) {
+               eq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+                                       putPaddrLow(dmabuf->phys);
+               eq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+                                       putPaddrHigh(dmabuf->phys);
+       }
+       mbox->vport = phba->pport;
+       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       mbox->context1 = NULL;
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       shdr = (union lpfc_sli4_cfg_shdr *) &eq_create->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2500 EQ_CREATE mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+       }
+       eq->type = LPFC_EQ;
+       eq->subtype = LPFC_NONE;
+       eq->queue_id = bf_get(lpfc_mbx_eq_create_q_id, &eq_create->u.response);
+       if (eq->queue_id == 0xFFFF)
+               status = -ENXIO;
+       eq->host_index = 0;
+       eq->hba_index = 0;
+
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_cq_create - Create a Completion Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @cq: The queue structure to use to create the completion queue.
+ * @eq: The event queue to bind this completion queue to.
+ *
+ * This function creates a completion queue, as detailed in @wq, on a port,
+ * described by @phba by sending a CQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @eq
+ * is used to indicate which event queue to bind this completion queue to. This
+ * function will send the CQ_CREATE mailbox command to the HBA to setup the
+ * completion queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
+              struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
+{
+       struct lpfc_mbx_cq_create *cq_create;
+       struct lpfc_dmabuf *dmabuf;
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_cq_create) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_CQ_CREATE,
+                        length, LPFC_SLI4_MBX_EMBED);
+       cq_create = &mbox->u.mqe.un.cq_create;
+       bf_set(lpfc_mbx_cq_create_num_pages, &cq_create->u.request,
+                   cq->page_count);
+       bf_set(lpfc_cq_context_event, &cq_create->u.request.context, 1);
+       bf_set(lpfc_cq_context_valid, &cq_create->u.request.context, 1);
+       bf_set(lpfc_cq_eq_id, &cq_create->u.request.context, eq->queue_id);
+       switch (cq->entry_count) {
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0361 Unsupported CQ count. (%d)\n",
+                               cq->entry_count);
+               if (cq->entry_count < 256)
+                       return -EINVAL;
+               /* otherwise default to smallest count (drop through) */
+       case 256:
+               bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+                      LPFC_CQ_CNT_256);
+               break;
+       case 512:
+               bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+                      LPFC_CQ_CNT_512);
+               break;
+       case 1024:
+               bf_set(lpfc_cq_context_count, &cq_create->u.request.context,
+                      LPFC_CQ_CNT_1024);
+               break;
+       }
+       list_for_each_entry(dmabuf, &cq->page_list, list) {
+               cq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+                                       putPaddrLow(dmabuf->phys);
+               cq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+                                       putPaddrHigh(dmabuf->phys);
+       }
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *) &cq_create->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2501 CQ_CREATE mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+               goto out;
+       }
+       cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+       if (cq->queue_id == 0xFFFF) {
+               status = -ENXIO;
+               goto out;
+       }
+       /* link the cq onto the parent eq child list */
+       list_add_tail(&cq->list, &eq->child_list);
+       /* Set up completion queue's type and subtype */
+       cq->type = type;
+       cq->subtype = subtype;
+       cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
+       cq->host_index = 0;
+       cq->hba_index = 0;
+out:
+
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_mq_create - Create a mailbox Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @mq: The queue structure to use to create the mailbox queue.
+ *
+ * This function creates a mailbox queue, as detailed in @mq, on a port,
+ * described by @phba by sending a MQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @cq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. This
+ * function will send the MQ_CREATE mailbox command to the HBA to setup the
+ * mailbox queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_mq_create(struct lpfc_hba *phba, struct lpfc_queue *mq,
+              struct lpfc_queue *cq, uint32_t subtype)
+{
+       struct lpfc_mbx_mq_create *mq_create;
+       struct lpfc_dmabuf *dmabuf;
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_mq_create) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_MQ_CREATE,
+                        length, LPFC_SLI4_MBX_EMBED);
+       mq_create = &mbox->u.mqe.un.mq_create;
+       bf_set(lpfc_mbx_mq_create_num_pages, &mq_create->u.request,
+                   mq->page_count);
+       bf_set(lpfc_mq_context_cq_id, &mq_create->u.request.context,
+                   cq->queue_id);
+       bf_set(lpfc_mq_context_valid, &mq_create->u.request.context, 1);
+       switch (mq->entry_count) {
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0362 Unsupported MQ count. (%d)\n",
+                               mq->entry_count);
+               if (mq->entry_count < 16)
+                       return -EINVAL;
+               /* otherwise default to smallest count (drop through) */
+       case 16:
+               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+                      LPFC_MQ_CNT_16);
+               break;
+       case 32:
+               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+                      LPFC_MQ_CNT_32);
+               break;
+       case 64:
+               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+                      LPFC_MQ_CNT_64);
+               break;
+       case 128:
+               bf_set(lpfc_mq_context_count, &mq_create->u.request.context,
+                      LPFC_MQ_CNT_128);
+               break;
+       }
+       list_for_each_entry(dmabuf, &mq->page_list, list) {
+               mq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+                                       putPaddrLow(dmabuf->phys);
+               mq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+                                       putPaddrHigh(dmabuf->phys);
+       }
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *) &mq_create->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2502 MQ_CREATE mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+               goto out;
+       }
+       mq->queue_id = bf_get(lpfc_mbx_mq_create_q_id, &mq_create->u.response);
+       if (mq->queue_id == 0xFFFF) {
+               status = -ENXIO;
+               goto out;
+       }
+       mq->type = LPFC_MQ;
+       mq->subtype = subtype;
+       mq->host_index = 0;
+       mq->hba_index = 0;
+
+       /* link the mq onto the parent cq child list */
+       list_add_tail(&mq->list, &cq->child_list);
+out:
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_wq_create - Create a Work Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @wq: The queue structure to use to create the work queue.
+ * @cq: The completion queue to bind this work queue to.
+ * @subtype: The subtype of the work queue indicating its functionality.
+ *
+ * This function creates a work queue, as detailed in @wq, on a port, described
+ * by @phba by sending a WQ_CREATE mailbox command to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @wq struct
+ * is used to get the entry count and entry size that are necessary to
+ * determine the number of pages to allocate and use for this queue. The @cq
+ * is used to indicate which completion queue to bind this work queue to. This
+ * function will send the WQ_CREATE mailbox command to the HBA to setup the
+ * work queue. This function is asynchronous and will wait for the mailbox
+ * command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
+              struct lpfc_queue *cq, uint32_t subtype)
+{
+       struct lpfc_mbx_wq_create *wq_create;
+       struct lpfc_dmabuf *dmabuf;
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_wq_create) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                        LPFC_MBOX_OPCODE_FCOE_WQ_CREATE,
+                        length, LPFC_SLI4_MBX_EMBED);
+       wq_create = &mbox->u.mqe.un.wq_create;
+       bf_set(lpfc_mbx_wq_create_num_pages, &wq_create->u.request,
+                   wq->page_count);
+       bf_set(lpfc_mbx_wq_create_cq_id, &wq_create->u.request,
+                   cq->queue_id);
+       list_for_each_entry(dmabuf, &wq->page_list, list) {
+               wq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+                                       putPaddrLow(dmabuf->phys);
+               wq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+                                       putPaddrHigh(dmabuf->phys);
+       }
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *) &wq_create->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2503 WQ_CREATE mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+               goto out;
+       }
+       wq->queue_id = bf_get(lpfc_mbx_wq_create_q_id, &wq_create->u.response);
+       if (wq->queue_id == 0xFFFF) {
+               status = -ENXIO;
+               goto out;
+       }
+       wq->type = LPFC_WQ;
+       wq->subtype = subtype;
+       wq->host_index = 0;
+       wq->hba_index = 0;
+
+       /* link the wq onto the parent cq child list */
+       list_add_tail(&wq->list, &cq->child_list);
+out:
+       if (rc == MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_rq_create - Create a Receive Queue on the HBA
+ * @phba: HBA structure that indicates port to create a queue on.
+ * @hrq: The queue structure to use to create the header receive queue.
+ * @drq: The queue structure to use to create the data receive queue.
+ * @cq: The completion queue to bind this work queue to.
+ *
+ * This function creates a receive buffer queue pair , as detailed in @hrq and
+ * @drq, on a port, described by @phba by sending a RQ_CREATE mailbox command
+ * to the HBA.
+ *
+ * The @phba struct is used to send mailbox command to HBA. The @drq and @hrq
+ * struct is used to get the entry count that is necessary to determine the
+ * number of pages to use for this queue. The @cq is used to indicate which
+ * completion queue to bind received buffers that are posted to these queues to.
+ * This function will send the RQ_CREATE mailbox command to the HBA to setup the
+ * receive queue pair. This function is asynchronous and will wait for the
+ * mailbox command to finish before continuing.
+ *
+ * On success this function will return a zero. If unable to allocate enough
+ * memory this function will return ENOMEM. If the queue create mailbox command
+ * fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+              struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
+{
+       struct lpfc_mbx_rq_create *rq_create;
+       struct lpfc_dmabuf *dmabuf;
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       if (hrq->entry_count != drq->entry_count)
+               return -EINVAL;
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_rq_create) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                        LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+                        length, LPFC_SLI4_MBX_EMBED);
+       rq_create = &mbox->u.mqe.un.rq_create;
+       switch (hrq->entry_count) {
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2535 Unsupported RQ count. (%d)\n",
+                               hrq->entry_count);
+               if (hrq->entry_count < 512)
+                       return -EINVAL;
+               /* otherwise default to smallest count (drop through) */
+       case 512:
+               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+                      LPFC_RQ_RING_SIZE_512);
+               break;
+       case 1024:
+               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+                      LPFC_RQ_RING_SIZE_1024);
+               break;
+       case 2048:
+               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+                      LPFC_RQ_RING_SIZE_2048);
+               break;
+       case 4096:
+               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+                      LPFC_RQ_RING_SIZE_4096);
+               break;
+       }
+       bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+              cq->queue_id);
+       bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+              hrq->page_count);
+       bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+              LPFC_HDR_BUF_SIZE);
+       list_for_each_entry(dmabuf, &hrq->page_list, list) {
+               rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+                                       putPaddrLow(dmabuf->phys);
+               rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+                                       putPaddrHigh(dmabuf->phys);
+       }
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2504 RQ_CREATE mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+               goto out;
+       }
+       hrq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+       if (hrq->queue_id == 0xFFFF) {
+               status = -ENXIO;
+               goto out;
+       }
+       hrq->type = LPFC_HRQ;
+       hrq->subtype = subtype;
+       hrq->host_index = 0;
+       hrq->hba_index = 0;
+
+       /* now create the data queue */
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                        LPFC_MBOX_OPCODE_FCOE_RQ_CREATE,
+                        length, LPFC_SLI4_MBX_EMBED);
+       switch (drq->entry_count) {
+       default:
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2536 Unsupported RQ count. (%d)\n",
+                               drq->entry_count);
+               if (drq->entry_count < 512)
+                       return -EINVAL;
+               /* otherwise default to smallest count (drop through) */
+       case 512:
+               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+                      LPFC_RQ_RING_SIZE_512);
+               break;
+       case 1024:
+               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+                      LPFC_RQ_RING_SIZE_1024);
+               break;
+       case 2048:
+               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+                      LPFC_RQ_RING_SIZE_2048);
+               break;
+       case 4096:
+               bf_set(lpfc_rq_context_rq_size, &rq_create->u.request.context,
+                      LPFC_RQ_RING_SIZE_4096);
+               break;
+       }
+       bf_set(lpfc_rq_context_cq_id, &rq_create->u.request.context,
+              cq->queue_id);
+       bf_set(lpfc_mbx_rq_create_num_pages, &rq_create->u.request,
+              drq->page_count);
+       bf_set(lpfc_rq_context_buf_size, &rq_create->u.request.context,
+              LPFC_DATA_BUF_SIZE);
+       list_for_each_entry(dmabuf, &drq->page_list, list) {
+               rq_create->u.request.page[dmabuf->buffer_tag].addr_lo =
+                                       putPaddrLow(dmabuf->phys);
+               rq_create->u.request.page[dmabuf->buffer_tag].addr_hi =
+                                       putPaddrHigh(dmabuf->phys);
+       }
+       rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *) &rq_create->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               status = -ENXIO;
+               goto out;
+       }
+       drq->queue_id = bf_get(lpfc_mbx_rq_create_q_id, &rq_create->u.response);
+       if (drq->queue_id == 0xFFFF) {
+               status = -ENXIO;
+               goto out;
+       }
+       drq->type = LPFC_DRQ;
+       drq->subtype = subtype;
+       drq->host_index = 0;
+       drq->hba_index = 0;
+
+       /* link the header and data RQs onto the parent cq child list */
+       list_add_tail(&hrq->list, &cq->child_list);
+       list_add_tail(&drq->list, &cq->child_list);
+
+out:
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_eq_destroy - Destroy an event Queue on the HBA
+ * @eq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @eq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @eq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       if (!eq)
+               return -ENODEV;
+       mbox = mempool_alloc(eq->phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_eq_destroy) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_EQ_DESTROY,
+                        length, LPFC_SLI4_MBX_EMBED);
+       bf_set(lpfc_mbx_eq_destroy_q_id, &mbox->u.mqe.un.eq_destroy.u.request,
+              eq->queue_id);
+       mbox->vport = eq->phba->pport;
+       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+
+       rc = lpfc_sli_issue_mbox(eq->phba, mbox, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mbox->u.mqe.un.eq_destroy.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2505 EQ_DESTROY mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+       }
+
+       /* Remove eq from any list */
+       list_del_init(&eq->list);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, eq->phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_cq_destroy - Destroy a Completion Queue on the HBA
+ * @cq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @cq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @cq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       if (!cq)
+               return -ENODEV;
+       mbox = mempool_alloc(cq->phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_cq_destroy) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_CQ_DESTROY,
+                        length, LPFC_SLI4_MBX_EMBED);
+       bf_set(lpfc_mbx_cq_destroy_q_id, &mbox->u.mqe.un.cq_destroy.u.request,
+              cq->queue_id);
+       mbox->vport = cq->phba->pport;
+       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       rc = lpfc_sli_issue_mbox(cq->phba, mbox, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mbox->u.mqe.un.wq_create.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2506 CQ_DESTROY mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+       }
+       /* Remove cq from any list */
+       list_del_init(&cq->list);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, cq->phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_mq_destroy - Destroy a Mailbox Queue on the HBA
+ * @qm: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @mq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @mq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       if (!mq)
+               return -ENODEV;
+       mbox = mempool_alloc(mq->phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_mq_destroy) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_COMMON,
+                        LPFC_MBOX_OPCODE_MQ_DESTROY,
+                        length, LPFC_SLI4_MBX_EMBED);
+       bf_set(lpfc_mbx_mq_destroy_q_id, &mbox->u.mqe.un.mq_destroy.u.request,
+              mq->queue_id);
+       mbox->vport = mq->phba->pport;
+       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       rc = lpfc_sli_issue_mbox(mq->phba, mbox, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mbox->u.mqe.un.mq_destroy.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2507 MQ_DESTROY mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+       }
+       /* Remove mq from any list */
+       list_del_init(&mq->list);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, mq->phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_wq_destroy - Destroy a Work Queue on the HBA
+ * @wq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @wq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @wq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       if (!wq)
+               return -ENODEV;
+       mbox = mempool_alloc(wq->phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_wq_destroy) -
+                 sizeof(struct lpfc_sli4_cfg_mhdr));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                        LPFC_MBOX_OPCODE_FCOE_WQ_DESTROY,
+                        length, LPFC_SLI4_MBX_EMBED);
+       bf_set(lpfc_mbx_wq_destroy_q_id, &mbox->u.mqe.un.wq_destroy.u.request,
+              wq->queue_id);
+       mbox->vport = wq->phba->pport;
+       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       rc = lpfc_sli_issue_mbox(wq->phba, mbox, MBX_POLL);
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mbox->u.mqe.un.wq_destroy.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2508 WQ_DESTROY mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+       }
+       /* Remove wq from any list */
+       list_del_init(&wq->list);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, wq->phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_rq_destroy - Destroy a Receive Queue on the HBA
+ * @rq: The queue structure associated with the queue to destroy.
+ *
+ * This function destroys a queue, as detailed in @rq by sending an mailbox
+ * command, specific to the type of queue, to the HBA.
+ *
+ * The @rq struct is used to get the queue ID of the queue to destroy.
+ *
+ * On success this function will return a zero. If the queue destroy mailbox
+ * command fails this function will return ENXIO.
+ **/
+uint32_t
+lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
+               struct lpfc_queue *drq)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc, length, status = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       if (!hrq || !drq)
+               return -ENODEV;
+       mbox = mempool_alloc(hrq->phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+       length = (sizeof(struct lpfc_mbx_rq_destroy) -
+                 sizeof(struct mbox_header));
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                        LPFC_MBOX_OPCODE_FCOE_RQ_DESTROY,
+                        length, LPFC_SLI4_MBX_EMBED);
+       bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+              hrq->queue_id);
+       mbox->vport = hrq->phba->pport;
+       mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+       rc = lpfc_sli_issue_mbox(hrq->phba, mbox, MBX_POLL);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2509 RQ_DESTROY mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               if (rc != MBX_TIMEOUT)
+                       mempool_free(mbox, hrq->phba->mbox_mem_pool);
+               return -ENXIO;
+       }
+       bf_set(lpfc_mbx_rq_destroy_q_id, &mbox->u.mqe.un.rq_destroy.u.request,
+              drq->queue_id);
+       rc = lpfc_sli_issue_mbox(drq->phba, mbox, MBX_POLL);
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mbox->u.mqe.un.rq_destroy.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2510 RQ_DESTROY mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               status = -ENXIO;
+       }
+       list_del_init(&hrq->list);
+       list_del_init(&drq->list);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, hrq->phba->mbox_mem_pool);
+       return status;
+}
+
+/**
+ * lpfc_sli4_post_sgl - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ * @pdma_phys_addr0: Physical address of the 1st SGL page.
+ * @pdma_phys_addr1: Physical address of the 2nd SGL page.
+ * @xritag: the xritag that ties this io to the SGL pages.
+ *
+ * This routine will post the sgl pages for the IO that has the xritag
+ * that is in the iocbq structure. The xritag is assigned during iocbq
+ * creation and persists for as long as the driver is loaded.
+ * if the caller has fewer than 256 scatter gather segments to map then
+ * pdma_phys_addr1 should be 0.
+ * If the caller needs to map more than 256 scatter gather segment then
+ * pdma_phys_addr1 should be a valid physical address.
+ * physical address for SGLs must be 64 byte aligned.
+ * If you are going to map 2 SGL's then the first one must have 256 entries
+ * the second sgl can have between 1 and 256 entries.
+ *
+ * Return codes:
+ *     0 - Success
+ *     -ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_post_sgl(struct lpfc_hba *phba,
+               dma_addr_t pdma_phys_addr0,
+               dma_addr_t pdma_phys_addr1,
+               uint16_t xritag)
+{
+       struct lpfc_mbx_post_sgl_pages *post_sgl_pages;
+       LPFC_MBOXQ_t *mbox;
+       int rc;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       if (xritag == NO_XRI) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "0364 Invalid param:\n");
+               return -EINVAL;
+       }
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                       LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES,
+                       sizeof(struct lpfc_mbx_post_sgl_pages) -
+                       sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+
+       post_sgl_pages = (struct lpfc_mbx_post_sgl_pages *)
+                               &mbox->u.mqe.un.post_sgl_pages;
+       bf_set(lpfc_post_sgl_pages_xri, post_sgl_pages, xritag);
+       bf_set(lpfc_post_sgl_pages_xricnt, post_sgl_pages, 1);
+
+       post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_lo =
+                               cpu_to_le32(putPaddrLow(pdma_phys_addr0));
+       post_sgl_pages->sgl_pg_pairs[0].sgl_pg0_addr_hi =
+                               cpu_to_le32(putPaddrHigh(pdma_phys_addr0));
+
+       post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_lo =
+                               cpu_to_le32(putPaddrLow(pdma_phys_addr1));
+       post_sgl_pages->sgl_pg_pairs[0].sgl_pg1_addr_hi =
+                               cpu_to_le32(putPaddrHigh(pdma_phys_addr1));
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *) &post_sgl_pages->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2511 POST_SGL mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               rc = -ENXIO;
+       }
+       return 0;
+}
+/**
+ * lpfc_sli4_remove_all_sgl_pages - Post scatter gather list for an XRI to HBA
+ * @phba: The virtual port for which this call being executed.
+ *
+ * This routine will remove all of the sgl pages registered with the hba.
+ *
+ * Return codes:
+ *     0 - Success
+ *     -ENXIO, -ENOMEM - Failure
+ **/
+int
+lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *phba)
+{
+       LPFC_MBOXQ_t *mbox;
+       int rc;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox)
+               return -ENOMEM;
+
+       lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                       LPFC_MBOX_OPCODE_FCOE_REMOVE_SGL_PAGES, 0,
+                       LPFC_SLI4_MBX_EMBED);
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *)
+               &mbox->u.mqe.un.sli4_config.header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mbox, phba->mbox_mem_pool);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2512 REMOVE_ALL_SGL_PAGES mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               rc = -ENXIO;
+       }
+       return rc;
+}
+
+/**
+ * lpfc_sli4_next_xritag - Get an xritag for the io
+ * @phba: Pointer to HBA context object.
+ *
+ * This function gets an xritag for the iocb. If there is no unused xritag
+ * it will return 0xffff.
+ * The function returns the allocated xritag if successful, else returns zero.
+ * Zero is not a valid xritag.
+ * The caller is not required to hold any lock.
+ **/
+uint16_t
+lpfc_sli4_next_xritag(struct lpfc_hba *phba)
+{
+       uint16_t xritag;
+
+       spin_lock_irq(&phba->hbalock);
+       xritag = phba->sli4_hba.next_xri;
+       if ((xritag != (uint16_t) -1) && xritag <
+               (phba->sli4_hba.max_cfg_param.max_xri
+                       + phba->sli4_hba.max_cfg_param.xri_base)) {
+               phba->sli4_hba.next_xri++;
+               phba->sli4_hba.max_cfg_param.xri_used++;
+               spin_unlock_irq(&phba->hbalock);
+               return xritag;
+       }
+       spin_unlock_irq(&phba->hbalock);
+
+       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                       "2004 Failed to allocate XRI.last XRITAG is %d"
+                       " Max XRI is %d, Used XRI is %d\n",
+                       phba->sli4_hba.next_xri,
+                       phba->sli4_hba.max_cfg_param.max_xri,
+                       phba->sli4_hba.max_cfg_param.xri_used);
+       return -1;
+}
+
+/**
+ * lpfc_sli4_post_sgl_list - post a block of sgl list to the firmware.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post a block of driver's sgl pages to the
+ * HBA using non-embedded mailbox command. No Lock is held. This routine
+ * is only called when the driver is loading and after all IO has been
+ * stopped.
+ **/
+int
+lpfc_sli4_post_sgl_list(struct lpfc_hba *phba)
+{
+       struct lpfc_sglq *sglq_entry;
+       struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+       struct sgl_page_pairs *sgl_pg_pairs;
+       void *viraddr;
+       LPFC_MBOXQ_t *mbox;
+       uint32_t reqlen, alloclen, pg_pairs;
+       uint32_t mbox_tmo;
+       uint16_t xritag_start = 0;
+       int els_xri_cnt, rc = 0;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       /* The number of sgls to be posted */
+       els_xri_cnt = lpfc_sli4_get_els_iocb_cnt(phba);
+
+       reqlen = els_xri_cnt * sizeof(struct sgl_page_pairs) +
+                sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+       if (reqlen > PAGE_SIZE) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "2559 Block sgl registration required DMA "
+                               "size (%d) great than a page\n", reqlen);
+               return -ENOMEM;
+       }
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2560 Failed to allocate mbox cmd memory\n");
+               return -ENOMEM;
+       }
+
+       /* Allocate DMA memory and set up the non-embedded mailbox command */
+       alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                        LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+                        LPFC_SLI4_MBX_NEMBED);
+
+       if (alloclen < reqlen) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0285 Allocated DMA memory size (%d) is "
+                               "less than the requested DMA memory "
+                               "size (%d)\n", alloclen, reqlen);
+               lpfc_sli4_mbox_cmd_free(phba, mbox);
+               return -ENOMEM;
+       }
+
+       /* Get the first SGE entry from the non-embedded DMA memory */
+       if (unlikely(!mbox->sge_array)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "2525 Failed to get the non-embedded SGE "
+                               "virtual address\n");
+               lpfc_sli4_mbox_cmd_free(phba, mbox);
+               return -ENOMEM;
+       }
+       viraddr = mbox->sge_array->addr[0];
+
+       /* Set up the SGL pages in the non-embedded DMA pages */
+       sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+       sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+       for (pg_pairs = 0; pg_pairs < els_xri_cnt; pg_pairs++) {
+               sglq_entry = phba->sli4_hba.lpfc_els_sgl_array[pg_pairs];
+               /* Set up the sge entry */
+               sgl_pg_pairs->sgl_pg0_addr_lo =
+                               cpu_to_le32(putPaddrLow(sglq_entry->phys));
+               sgl_pg_pairs->sgl_pg0_addr_hi =
+                               cpu_to_le32(putPaddrHigh(sglq_entry->phys));
+               sgl_pg_pairs->sgl_pg1_addr_lo =
+                               cpu_to_le32(putPaddrLow(0));
+               sgl_pg_pairs->sgl_pg1_addr_hi =
+                               cpu_to_le32(putPaddrHigh(0));
+               /* Keep the first xritag on the list */
+               if (pg_pairs == 0)
+                       xritag_start = sglq_entry->sli4_xritag;
+               sgl_pg_pairs++;
+       }
+       bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+       pg_pairs = (pg_pairs > 0) ? (pg_pairs - 1) : pg_pairs;
+       bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+       /* Perform endian conversion if necessary */
+       sgl->word0 = cpu_to_le32(sgl->word0);
+
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
+       shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (rc != MBX_TIMEOUT)
+               lpfc_sli4_mbox_cmd_free(phba, mbox);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2513 POST_SGL_BLOCK mailbox command failed "
+                               "status x%x add_status x%x mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               rc = -ENXIO;
+       }
+       return rc;
+}
+
+/**
+ * lpfc_sli4_post_scsi_sgl_block - post a block of scsi sgl list to firmware
+ * @phba: pointer to lpfc hba data structure.
+ * @sblist: pointer to scsi buffer list.
+ * @count: number of scsi buffers on the list.
+ *
+ * This routine is invoked to post a block of @count scsi sgl pages from a
+ * SCSI buffer list @sblist to the HBA using non-embedded mailbox command.
+ * No Lock is held.
+ *
+ **/
+int
+lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *phba, struct list_head *sblist,
+                             int cnt)
+{
+       struct lpfc_scsi_buf *psb;
+       struct lpfc_mbx_post_uembed_sgl_page1 *sgl;
+       struct sgl_page_pairs *sgl_pg_pairs;
+       void *viraddr;
+       LPFC_MBOXQ_t *mbox;
+       uint32_t reqlen, alloclen, pg_pairs;
+       uint32_t mbox_tmo;
+       uint16_t xritag_start = 0;
+       int rc = 0;
+       uint32_t shdr_status, shdr_add_status;
+       dma_addr_t pdma_phys_bpl1;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       /* Calculate the requested length of the dma memory */
+       reqlen = cnt * sizeof(struct sgl_page_pairs) +
+                sizeof(union lpfc_sli4_cfg_shdr) + sizeof(uint32_t);
+       if (reqlen > PAGE_SIZE) {
+               lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
+                               "0217 Block sgl registration required DMA "
+                               "size (%d) great than a page\n", reqlen);
+               return -ENOMEM;
+       }
+       mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mbox) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0283 Failed to allocate mbox cmd memory\n");
+               return -ENOMEM;
+       }
+
+       /* Allocate DMA memory and set up the non-embedded mailbox command */
+       alloclen = lpfc_sli4_config(phba, mbox, LPFC_MBOX_SUBSYSTEM_FCOE,
+                               LPFC_MBOX_OPCODE_FCOE_POST_SGL_PAGES, reqlen,
+                               LPFC_SLI4_MBX_NEMBED);
+
+       if (alloclen < reqlen) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2561 Allocated DMA memory size (%d) is "
+                               "less than the requested DMA memory "
+                               "size (%d)\n", alloclen, reqlen);
+               lpfc_sli4_mbox_cmd_free(phba, mbox);
+               return -ENOMEM;
+       }
+
+       /* Get the first SGE entry from the non-embedded DMA memory */
+       if (unlikely(!mbox->sge_array)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "2565 Failed to get the non-embedded SGE "
+                               "virtual address\n");
+               lpfc_sli4_mbox_cmd_free(phba, mbox);
+               return -ENOMEM;
+       }
+       viraddr = mbox->sge_array->addr[0];
+
+       /* Set up the SGL pages in the non-embedded DMA pages */
+       sgl = (struct lpfc_mbx_post_uembed_sgl_page1 *)viraddr;
+       sgl_pg_pairs = &sgl->sgl_pg_pairs;
+
+       pg_pairs = 0;
+       list_for_each_entry(psb, sblist, list) {
+               /* Set up the sge entry */
+               sgl_pg_pairs->sgl_pg0_addr_lo =
+                       cpu_to_le32(putPaddrLow(psb->dma_phys_bpl));
+               sgl_pg_pairs->sgl_pg0_addr_hi =
+                       cpu_to_le32(putPaddrHigh(psb->dma_phys_bpl));
+               if (phba->cfg_sg_dma_buf_size > SGL_PAGE_SIZE)
+                       pdma_phys_bpl1 = psb->dma_phys_bpl + SGL_PAGE_SIZE;
+               else
+                       pdma_phys_bpl1 = 0;
+               sgl_pg_pairs->sgl_pg1_addr_lo =
+                       cpu_to_le32(putPaddrLow(pdma_phys_bpl1));
+               sgl_pg_pairs->sgl_pg1_addr_hi =
+                       cpu_to_le32(putPaddrHigh(pdma_phys_bpl1));
+               /* Keep the first xritag on the list */
+               if (pg_pairs == 0)
+                       xritag_start = psb->cur_iocbq.sli4_xritag;
+               sgl_pg_pairs++;
+               pg_pairs++;
+       }
+       bf_set(lpfc_post_sgl_pages_xri, sgl, xritag_start);
+       bf_set(lpfc_post_sgl_pages_xricnt, sgl, pg_pairs);
+       /* Perform endian conversion if necessary */
+       sgl->word0 = cpu_to_le32(sgl->word0);
+
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mbox, MBX_POLL);
+       else {
+               mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+               rc = lpfc_sli_issue_mbox_wait(phba, mbox, mbox_tmo);
+       }
+       shdr = (union lpfc_sli4_cfg_shdr *) &sgl->cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (rc != MBX_TIMEOUT)
+               lpfc_sli4_mbox_cmd_free(phba, mbox);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2564 POST_SGL_BLOCK mailbox command failed "
+                               "status x%x add_status x%x mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               rc = -ENXIO;
+       }
+       return rc;
+}
+
+/**
+ * lpfc_fc_frame_check - Check that this frame is a valid frame to handle
+ * @phba: pointer to lpfc_hba struct that the frame was received on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function checks the fields in the @fc_hdr to see if the FC frame is a
+ * valid type of frame that the LPFC driver will handle. This function will
+ * return a zero if the frame is a valid frame or a non zero value when the
+ * frame does not pass the check.
+ **/
+static int
+lpfc_fc_frame_check(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr)
+{
+       char *rctl_names[] = FC_RCTL_NAMES_INIT;
+       char *type_names[] = FC_TYPE_NAMES_INIT;
+       struct fc_vft_header *fc_vft_hdr;
+
+       switch (fc_hdr->fh_r_ctl) {
+       case FC_RCTL_DD_UNCAT:          /* uncategorized information */
+       case FC_RCTL_DD_SOL_DATA:       /* solicited data */
+       case FC_RCTL_DD_UNSOL_CTL:      /* unsolicited control */
+       case FC_RCTL_DD_SOL_CTL:        /* solicited control or reply */
+       case FC_RCTL_DD_UNSOL_DATA:     /* unsolicited data */
+       case FC_RCTL_DD_DATA_DESC:      /* data descriptor */
+       case FC_RCTL_DD_UNSOL_CMD:      /* unsolicited command */
+       case FC_RCTL_DD_CMD_STATUS:     /* command status */
+       case FC_RCTL_ELS_REQ:   /* extended link services request */
+       case FC_RCTL_ELS_REP:   /* extended link services reply */
+       case FC_RCTL_ELS4_REQ:  /* FC-4 ELS request */
+       case FC_RCTL_ELS4_REP:  /* FC-4 ELS reply */
+       case FC_RCTL_BA_NOP:    /* basic link service NOP */
+       case FC_RCTL_BA_ABTS:   /* basic link service abort */
+       case FC_RCTL_BA_RMC:    /* remove connection */
+       case FC_RCTL_BA_ACC:    /* basic accept */
+       case FC_RCTL_BA_RJT:    /* basic reject */
+       case FC_RCTL_BA_PRMT:
+       case FC_RCTL_ACK_1:     /* acknowledge_1 */
+       case FC_RCTL_ACK_0:     /* acknowledge_0 */
+       case FC_RCTL_P_RJT:     /* port reject */
+       case FC_RCTL_F_RJT:     /* fabric reject */
+       case FC_RCTL_P_BSY:     /* port busy */
+       case FC_RCTL_F_BSY:     /* fabric busy to data frame */
+       case FC_RCTL_F_BSYL:    /* fabric busy to link control frame */
+       case FC_RCTL_LCR:       /* link credit reset */
+       case FC_RCTL_END:       /* end */
+               break;
+       case FC_RCTL_VFTH:      /* Virtual Fabric tagging Header */
+               fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+               fc_hdr = &((struct fc_frame_header *)fc_vft_hdr)[1];
+               return lpfc_fc_frame_check(phba, fc_hdr);
+       default:
+               goto drop;
+       }
+       switch (fc_hdr->fh_type) {
+       case FC_TYPE_BLS:
+       case FC_TYPE_ELS:
+       case FC_TYPE_FCP:
+       case FC_TYPE_CT:
+               break;
+       case FC_TYPE_IP:
+       case FC_TYPE_ILS:
+       default:
+               goto drop;
+       }
+       lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
+                       "2538 Received frame rctl:%s type:%s\n",
+                       rctl_names[fc_hdr->fh_r_ctl],
+                       type_names[fc_hdr->fh_type]);
+       return 0;
+drop:
+       lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
+                       "2539 Dropped frame rctl:%s type:%s\n",
+                       rctl_names[fc_hdr->fh_r_ctl],
+                       type_names[fc_hdr->fh_type]);
+       return 1;
+}
+
+/**
+ * lpfc_fc_hdr_get_vfi - Get the VFI from an FC frame
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ *
+ * This function processes the FC header to retrieve the VFI from the VF
+ * header, if one exists. This function will return the VFI if one exists
+ * or 0 if no VSAN Header exists.
+ **/
+static uint32_t
+lpfc_fc_hdr_get_vfi(struct fc_frame_header *fc_hdr)
+{
+       struct fc_vft_header *fc_vft_hdr = (struct fc_vft_header *)fc_hdr;
+
+       if (fc_hdr->fh_r_ctl != FC_RCTL_VFTH)
+               return 0;
+       return bf_get(fc_vft_hdr_vf_id, fc_vft_hdr);
+}
+
+/**
+ * lpfc_fc_frame_to_vport - Finds the vport that a frame is destined to
+ * @phba: Pointer to the HBA structure to search for the vport on
+ * @fc_hdr: A pointer to the FC Header data (In Big Endian Format)
+ * @fcfi: The FC Fabric ID that the frame came from
+ *
+ * This function searches the @phba for a vport that matches the content of the
+ * @fc_hdr passed in and the @fcfi. This function uses the @fc_hdr to fetch the
+ * VFI, if the Virtual Fabric Tagging Header exists, and the DID. This function
+ * returns the matching vport pointer or NULL if unable to match frame to a
+ * vport.
+ **/
+static struct lpfc_vport *
+lpfc_fc_frame_to_vport(struct lpfc_hba *phba, struct fc_frame_header *fc_hdr,
+                      uint16_t fcfi)
+{
+       struct lpfc_vport **vports;
+       struct lpfc_vport *vport = NULL;
+       int i;
+       uint32_t did = (fc_hdr->fh_d_id[0] << 16 |
+                       fc_hdr->fh_d_id[1] << 8 |
+                       fc_hdr->fh_d_id[2]);
+
+       vports = lpfc_create_vport_work_array(phba);
+       if (vports != NULL)
+               for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
+                       if (phba->fcf.fcfi == fcfi &&
+                           vports[i]->vfi == lpfc_fc_hdr_get_vfi(fc_hdr) &&
+                           vports[i]->fc_myDID == did) {
+                               vport = vports[i];
+                               break;
+                       }
+               }
+       lpfc_destroy_vport_work_array(phba, vports);
+       return vport;
+}
+
+/**
+ * lpfc_fc_frame_add - Adds a frame to the vport's list of received sequences
+ * @dmabuf: pointer to a dmabuf that describes the hdr and data of the FC frame
+ *
+ * This function searches through the existing incomplete sequences that have
+ * been sent to this @vport. If the frame matches one of the incomplete
+ * sequences then the dbuf in the @dmabuf is added to the list of frames that
+ * make up that sequence. If no sequence is found that matches this frame then
+ * the function will add the hbuf in the @dmabuf to the @vport's rcv_buffer_list
+ * This function returns a pointer to the first dmabuf in the sequence list that
+ * the frame was linked to.
+ **/
+static struct hbq_dmabuf *
+lpfc_fc_frame_add(struct lpfc_vport *vport, struct hbq_dmabuf *dmabuf)
+{
+       struct fc_frame_header *new_hdr;
+       struct fc_frame_header *temp_hdr;
+       struct lpfc_dmabuf *d_buf;
+       struct lpfc_dmabuf *h_buf;
+       struct hbq_dmabuf *seq_dmabuf = NULL;
+       struct hbq_dmabuf *temp_dmabuf = NULL;
+
+       new_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+       /* Use the hdr_buf to find the sequence that this frame belongs to */
+       list_for_each_entry(h_buf, &vport->rcv_buffer_list, list) {
+               temp_hdr = (struct fc_frame_header *)h_buf->virt;
+               if ((temp_hdr->fh_seq_id != new_hdr->fh_seq_id) ||
+                   (temp_hdr->fh_ox_id != new_hdr->fh_ox_id) ||
+                   (memcmp(&temp_hdr->fh_s_id, &new_hdr->fh_s_id, 3)))
+                       continue;
+               /* found a pending sequence that matches this frame */
+               seq_dmabuf = container_of(h_buf, struct hbq_dmabuf, hbuf);
+               break;
+       }
+       if (!seq_dmabuf) {
+               /*
+                * This indicates first frame received for this sequence.
+                * Queue the buffer on the vport's rcv_buffer_list.
+                */
+               list_add_tail(&dmabuf->hbuf.list, &vport->rcv_buffer_list);
+               return dmabuf;
+       }
+       temp_hdr = seq_dmabuf->hbuf.virt;
+       if (new_hdr->fh_seq_cnt < temp_hdr->fh_seq_cnt) {
+               list_add(&seq_dmabuf->dbuf.list, &dmabuf->dbuf.list);
+               return dmabuf;
+       }
+       /* find the correct place in the sequence to insert this frame */
+       list_for_each_entry_reverse(d_buf, &seq_dmabuf->dbuf.list, list) {
+               temp_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+               temp_hdr = (struct fc_frame_header *)temp_dmabuf->hbuf.virt;
+               /*
+                * If the frame's sequence count is greater than the frame on
+                * the list then insert the frame right after this frame
+                */
+               if (new_hdr->fh_seq_cnt > temp_hdr->fh_seq_cnt) {
+                       list_add(&dmabuf->dbuf.list, &temp_dmabuf->dbuf.list);
+                       return seq_dmabuf;
+               }
+       }
+       return NULL;
+}
+
+/**
+ * lpfc_seq_complete - Indicates if a sequence is complete
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function checks the sequence, starting with the frame described by
+ * @dmabuf, to see if all the frames associated with this sequence are present.
+ * the frames associated with this sequence are linked to the @dmabuf using the
+ * dbuf list. This function looks for two major things. 1) That the first frame
+ * has a sequence count of zero. 2) There is a frame with last frame of sequence
+ * set. 3) That there are no holes in the sequence count. The function will
+ * return 1 when the sequence is complete, otherwise it will return 0.
+ **/
+static int
+lpfc_seq_complete(struct hbq_dmabuf *dmabuf)
+{
+       struct fc_frame_header *hdr;
+       struct lpfc_dmabuf *d_buf;
+       struct hbq_dmabuf *seq_dmabuf;
+       uint32_t fctl;
+       int seq_count = 0;
+
+       hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+       /* make sure first fame of sequence has a sequence count of zero */
+       if (hdr->fh_seq_cnt != seq_count)
+               return 0;
+       fctl = (hdr->fh_f_ctl[0] << 16 |
+               hdr->fh_f_ctl[1] << 8 |
+               hdr->fh_f_ctl[2]);
+       /* If last frame of sequence we can return success. */
+       if (fctl & FC_FC_END_SEQ)
+               return 1;
+       list_for_each_entry(d_buf, &dmabuf->dbuf.list, list) {
+               seq_dmabuf = container_of(d_buf, struct hbq_dmabuf, dbuf);
+               hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+               /* If there is a hole in the sequence count then fail. */
+               if (++seq_count != hdr->fh_seq_cnt)
+                       return 0;
+               fctl = (hdr->fh_f_ctl[0] << 16 |
+                       hdr->fh_f_ctl[1] << 8 |
+                       hdr->fh_f_ctl[2]);
+               /* If last frame of sequence we can return success. */
+               if (fctl & FC_FC_END_SEQ)
+                       return 1;
+       }
+       return 0;
+}
+
+/**
+ * lpfc_prep_seq - Prep sequence for ULP processing
+ * @vport: Pointer to the vport on which this sequence was received
+ * @dmabuf: pointer to a dmabuf that describes the FC sequence
+ *
+ * This function takes a sequence, described by a list of frames, and creates
+ * a list of iocbq structures to describe the sequence. This iocbq list will be
+ * used to issue to the generic unsolicited sequence handler. This routine
+ * returns a pointer to the first iocbq in the list. If the function is unable
+ * to allocate an iocbq then it throw out the received frames that were not
+ * able to be described and return a pointer to the first iocbq. If unable to
+ * allocate any iocbqs (including the first) this function will return NULL.
+ **/
+static struct lpfc_iocbq *
+lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
+{
+       struct lpfc_dmabuf *d_buf, *n_buf;
+       struct lpfc_iocbq *first_iocbq, *iocbq;
+       struct fc_frame_header *fc_hdr;
+       uint32_t sid;
+
+       fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+       /* remove from receive buffer list */
+       list_del_init(&seq_dmabuf->hbuf.list);
+       /* get the Remote Port's SID */
+       sid = (fc_hdr->fh_s_id[0] << 16 |
+              fc_hdr->fh_s_id[1] << 8 |
+              fc_hdr->fh_s_id[2]);
+       /* Get an iocbq struct to fill in. */
+       first_iocbq = lpfc_sli_get_iocbq(vport->phba);
+       if (first_iocbq) {
+               /* Initialize the first IOCB. */
+               first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
+               first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
+               first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
+               first_iocbq->iocb.unsli3.rcvsli3.vpi =
+                                       vport->vpi + vport->phba->vpi_base;
+               /* put the first buffer into the first IOCBq */
+               first_iocbq->context2 = &seq_dmabuf->dbuf;
+               first_iocbq->context3 = NULL;
+               first_iocbq->iocb.ulpBdeCount = 1;
+               first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+                                                       LPFC_DATA_BUF_SIZE;
+               first_iocbq->iocb.un.rcvels.remoteID = sid;
+       }
+       iocbq = first_iocbq;
+       /*
+        * Each IOCBq can have two Buffers assigned, so go through the list
+        * of buffers for this sequence and save two buffers in each IOCBq
+        */
+       list_for_each_entry_safe(d_buf, n_buf, &seq_dmabuf->dbuf.list, list) {
+               if (!iocbq) {
+                       lpfc_in_buf_free(vport->phba, d_buf);
+                       continue;
+               }
+               if (!iocbq->context3) {
+                       iocbq->context3 = d_buf;
+                       iocbq->iocb.ulpBdeCount++;
+                       iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
+                                                       LPFC_DATA_BUF_SIZE;
+               } else {
+                       iocbq = lpfc_sli_get_iocbq(vport->phba);
+                       if (!iocbq) {
+                               if (first_iocbq) {
+                                       first_iocbq->iocb.ulpStatus =
+                                                       IOSTAT_FCP_RSP_ERROR;
+                                       first_iocbq->iocb.un.ulpWord[4] =
+                                                       IOERR_NO_RESOURCES;
+                               }
+                               lpfc_in_buf_free(vport->phba, d_buf);
+                               continue;
+                       }
+                       iocbq->context2 = d_buf;
+                       iocbq->context3 = NULL;
+                       iocbq->iocb.ulpBdeCount = 1;
+                       iocbq->iocb.un.cont64[0].tus.f.bdeSize =
+                                                       LPFC_DATA_BUF_SIZE;
+                       iocbq->iocb.un.rcvels.remoteID = sid;
+                       list_add_tail(&iocbq->list, &first_iocbq->list);
+               }
+       }
+       return first_iocbq;
+}
+
+/**
+ * lpfc_sli4_handle_received_buffer - Handle received buffers from firmware
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called with no lock held. This function processes all
+ * the received buffers and gives it to upper layers when a received buffer
+ * indicates that it is the final frame in the sequence. The interrupt
+ * service routine processes received buffers at interrupt contexts and adds
+ * received dma buffers to the rb_pend_list queue and signals the worker thread.
+ * Worker thread calls lpfc_sli4_handle_received_buffer, which will call the
+ * appropriate receive function when the final frame in a sequence is received.
+ **/
+int
+lpfc_sli4_handle_received_buffer(struct lpfc_hba *phba)
+{
+       LIST_HEAD(cmplq);
+       struct hbq_dmabuf *dmabuf, *seq_dmabuf;
+       struct fc_frame_header *fc_hdr;
+       struct lpfc_vport *vport;
+       uint32_t fcfi;
+       struct lpfc_iocbq *iocbq;
+
+       /* Clear hba flag and get all received buffers into the cmplq */
+       spin_lock_irq(&phba->hbalock);
+       phba->hba_flag &= ~HBA_RECEIVE_BUFFER;
+       list_splice_init(&phba->rb_pend_list, &cmplq);
+       spin_unlock_irq(&phba->hbalock);
+
+       /* Process each received buffer */
+       while ((dmabuf = lpfc_sli_hbqbuf_get(&cmplq)) != NULL) {
+               fc_hdr = (struct fc_frame_header *)dmabuf->hbuf.virt;
+               /* check to see if this a valid type of frame */
+               if (lpfc_fc_frame_check(phba, fc_hdr)) {
+                       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+                       continue;
+               }
+               fcfi = bf_get(lpfc_rcqe_fcf_id, &dmabuf->rcqe);
+               vport = lpfc_fc_frame_to_vport(phba, fc_hdr, fcfi);
+               if (!vport) {
+                       /* throw out the frame */
+                       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+                       continue;
+               }
+               /* Link this frame */
+               seq_dmabuf = lpfc_fc_frame_add(vport, dmabuf);
+               if (!seq_dmabuf) {
+                       /* unable to add frame to vport - throw it out */
+                       lpfc_in_buf_free(phba, &dmabuf->dbuf);
+                       continue;
+               }
+               /* If not last frame in sequence continue processing frames. */
+               if (!lpfc_seq_complete(seq_dmabuf)) {
+                       /*
+                        * When saving off frames post a new one and mark this
+                        * frame to be freed when it is finished.
+                        **/
+                       lpfc_sli_hbqbuf_fill_hbqs(phba, LPFC_ELS_HBQ, 1);
+                       dmabuf->tag = -1;
+                       continue;
+               }
+               fc_hdr = (struct fc_frame_header *)seq_dmabuf->hbuf.virt;
+               iocbq = lpfc_prep_seq(vport, seq_dmabuf);
+               if (!lpfc_complete_unsol_iocb(phba,
+                                             &phba->sli.ring[LPFC_ELS_RING],
+                                             iocbq, fc_hdr->fh_r_ctl,
+                                             fc_hdr->fh_type))
+                       lpfc_printf_log(phba, KERN_WARNING, LOG_SLI,
+                                       "2540 Ring %d handler: unexpected Rctl "
+                                       "x%x Type x%x received\n",
+                                       LPFC_ELS_RING,
+                                       fc_hdr->fh_r_ctl, fc_hdr->fh_type);
+       };
+       return 0;
+}
+
+/**
+ * lpfc_sli4_post_all_rpi_hdrs - Post the rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * This routine does not require any locks.  It's usage is expected
+ * to be driver load or reset recovery when the driver is
+ * sequential.
+ *
+ * Return codes
+ *     0 - sucessful
+ *      EIO - The mailbox failed to complete successfully.
+ *     When this error occurs, the driver is not guaranteed
+ *     to have any rpi regions posted to the device and
+ *     must either attempt to repost the regions or take a
+ *     fatal error.
+ **/
+int
+lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *phba)
+{
+       struct lpfc_rpi_hdr *rpi_page;
+       uint32_t rc = 0;
+
+       /* Post all rpi memory regions to the port. */
+       list_for_each_entry(rpi_page, &phba->sli4_hba.lpfc_rpi_hdr_list, list) {
+               rc = lpfc_sli4_post_rpi_hdr(phba, rpi_page);
+               if (rc != MBX_SUCCESS) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "2008 Error %d posting all rpi "
+                                       "headers\n", rc);
+                       rc = -EIO;
+                       break;
+               }
+       }
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_post_rpi_hdr - Post an rpi header memory region to the port
+ * @phba: pointer to lpfc hba data structure.
+ * @rpi_page:  pointer to the rpi memory region.
+ *
+ * This routine is invoked to post a single rpi header to the
+ * HBA consistent with the SLI-4 interface spec.  This memory region
+ * maps up to 64 rpi context regions.
+ *
+ * Return codes
+ *     0 - sucessful
+ *     ENOMEM - No available memory
+ *      EIO - The mailbox failed to complete successfully.
+ **/
+int
+lpfc_sli4_post_rpi_hdr(struct lpfc_hba *phba, struct lpfc_rpi_hdr *rpi_page)
+{
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_mbx_post_hdr_tmpl *hdr_tmpl;
+       uint32_t rc = 0;
+       uint32_t mbox_tmo;
+       uint32_t shdr_status, shdr_add_status;
+       union lpfc_sli4_cfg_shdr *shdr;
+
+       /* The port is notified of the header region via a mailbox command. */
+       mboxq = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2001 Unable to allocate memory for issuing "
+                               "SLI_CONFIG_SPECIAL mailbox command\n");
+               return -ENOMEM;
+       }
+
+       /* Post all rpi memory regions to the port. */
+       hdr_tmpl = &mboxq->u.mqe.un.hdr_tmpl;
+       mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_SLI4_CONFIG);
+       lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+                        LPFC_MBOX_OPCODE_FCOE_POST_HDR_TEMPLATE,
+                        sizeof(struct lpfc_mbx_post_hdr_tmpl) -
+                        sizeof(struct mbox_header), LPFC_SLI4_MBX_EMBED);
+       bf_set(lpfc_mbx_post_hdr_tmpl_page_cnt,
+              hdr_tmpl, rpi_page->page_count);
+       bf_set(lpfc_mbx_post_hdr_tmpl_rpi_offset, hdr_tmpl,
+              rpi_page->start_rpi);
+       hdr_tmpl->rpi_paddr_lo = putPaddrLow(rpi_page->dmabuf->phys);
+       hdr_tmpl->rpi_paddr_hi = putPaddrHigh(rpi_page->dmabuf->phys);
+       if (!phba->sli4_hba.intr_enable)
+               rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
+       else
+               rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+       shdr = (union lpfc_sli4_cfg_shdr *) &hdr_tmpl->header.cfg_shdr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mboxq, phba->mbox_mem_pool);
+       if (shdr_status || shdr_add_status || rc) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2514 POST_RPI_HDR mailbox failed with "
+                               "status x%x add_status x%x, mbx status x%x\n",
+                               shdr_status, shdr_add_status, rc);
+               rc = -ENXIO;
+       }
+       return rc;
+}
+
+/**
+ * lpfc_sli4_alloc_rpi - Get an available rpi in the device's range
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to post rpi header templates to the
+ * HBA consistent with the SLI-4 interface spec.  This routine
+ * posts a PAGE_SIZE memory region to the port to hold up to
+ * PAGE_SIZE modulo 64 rpi context headers.
+ *
+ * Returns
+ *     A nonzero rpi defined as rpi_base <= rpi < max_rpi if sucessful
+ *     LPFC_RPI_ALLOC_ERROR if no rpis are available.
+ **/
+int
+lpfc_sli4_alloc_rpi(struct lpfc_hba *phba)
+{
+       int rpi;
+       uint16_t max_rpi, rpi_base, rpi_limit;
+       uint16_t rpi_remaining;
+       struct lpfc_rpi_hdr *rpi_hdr;
+
+       max_rpi = phba->sli4_hba.max_cfg_param.max_rpi;
+       rpi_base = phba->sli4_hba.max_cfg_param.rpi_base;
+       rpi_limit = phba->sli4_hba.next_rpi;
+
+       /*
+        * The valid rpi range is not guaranteed to be zero-based.  Start
+        * the search at the rpi_base as reported by the port.
+        */
+       spin_lock_irq(&phba->hbalock);
+       rpi = find_next_zero_bit(phba->sli4_hba.rpi_bmask, rpi_limit, rpi_base);
+       if (rpi >= rpi_limit || rpi < rpi_base)
+               rpi = LPFC_RPI_ALLOC_ERROR;
+       else {
+               set_bit(rpi, phba->sli4_hba.rpi_bmask);
+               phba->sli4_hba.max_cfg_param.rpi_used++;
+               phba->sli4_hba.rpi_count++;
+       }
+
+       /*
+        * Don't try to allocate more rpi header regions if the device limit
+        * on available rpis max has been exhausted.
+        */
+       if ((rpi == LPFC_RPI_ALLOC_ERROR) &&
+           (phba->sli4_hba.rpi_count >= max_rpi)) {
+               spin_unlock_irq(&phba->hbalock);
+               return rpi;
+       }
+
+       /*
+        * If the driver is running low on rpi resources, allocate another
+        * page now.  Note that the next_rpi value is used because
+        * it represents how many are actually in use whereas max_rpi notes
+        * how many are supported max by the device.
+        */
+       rpi_remaining = phba->sli4_hba.next_rpi - rpi_base -
+               phba->sli4_hba.rpi_count;
+       spin_unlock_irq(&phba->hbalock);
+       if (rpi_remaining < LPFC_RPI_LOW_WATER_MARK) {
+               rpi_hdr = lpfc_sli4_create_rpi_hdr(phba);
+               if (!rpi_hdr) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                                       "2002 Error Could not grow rpi "
+                                       "count\n");
+               } else {
+                       lpfc_sli4_post_rpi_hdr(phba, rpi_hdr);
+               }
+       }
+
+       return rpi;
+}
+
+/**
+ * lpfc_sli4_free_rpi - Release an rpi for reuse.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to release an rpi to the pool of
+ * available rpis maintained by the driver.
+ **/
+void
+lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
+{
+       spin_lock_irq(&phba->hbalock);
+       clear_bit(rpi, phba->sli4_hba.rpi_bmask);
+       phba->sli4_hba.rpi_count--;
+       phba->sli4_hba.max_cfg_param.rpi_used--;
+       spin_unlock_irq(&phba->hbalock);
+}
+
+/**
+ * lpfc_sli4_remove_rpis - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+void
+lpfc_sli4_remove_rpis(struct lpfc_hba *phba)
+{
+       kfree(phba->sli4_hba.rpi_bmask);
+}
+
+/**
+ * lpfc_sli4_resume_rpi - Remove the rpi bitmask region
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is invoked to remove the memory region that
+ * provided rpi via a bitmask.
+ **/
+int
+lpfc_sli4_resume_rpi(struct lpfc_nodelist *ndlp)
+{
+       LPFC_MBOXQ_t *mboxq;
+       struct lpfc_hba *phba = ndlp->phba;
+       int rc;
+
+       /* The port is notified of the header region via a mailbox command. */
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               return -ENOMEM;
+
+       /* Post all rpi memory regions to the port. */
+       lpfc_resume_rpi(mboxq, ndlp);
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2010 Resume RPI Mailbox failed "
+                               "status %d, mbxStatus x%x\n", rc,
+                               bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+               mempool_free(mboxq, phba->mbox_mem_pool);
+               return -EIO;
+       }
+       return 0;
+}
+
+/**
+ * lpfc_sli4_init_vpi - Initialize a vpi with the port
+ * @phba: pointer to lpfc hba data structure.
+ * @vpi: vpi value to activate with the port.
+ *
+ * This routine is invoked to activate a vpi with the
+ * port when the host intends to use vports with a
+ * nonzero vpi.
+ *
+ * Returns:
+ *    0 success
+ *    -Evalue otherwise
+ **/
+int
+lpfc_sli4_init_vpi(struct lpfc_hba *phba, uint16_t vpi)
+{
+       LPFC_MBOXQ_t *mboxq;
+       int rc = 0;
+       uint32_t mbox_tmo;
+
+       if (vpi == 0)
+               return -EINVAL;
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq)
+               return -ENOMEM;
+       lpfc_init_vpi(mboxq, vpi);
+       mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
+       rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
+       if (rc != MBX_TIMEOUT)
+               mempool_free(mboxq, phba->mbox_mem_pool);
+       if (rc != MBX_SUCCESS) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+                               "2022 INIT VPI Mailbox failed "
+                               "status %d, mbxStatus x%x\n", rc,
+                               bf_get(lpfc_mqe_status, &mboxq->u.mqe));
+               rc = -EIO;
+       }
+       return rc;
+}
+
+/**
+ * lpfc_mbx_cmpl_add_fcf_record - add fcf mbox completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: Pointer to mailbox object.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record.  This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+static void
+lpfc_mbx_cmpl_add_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+       void *virt_addr;
+       union lpfc_sli4_cfg_shdr *shdr;
+       uint32_t shdr_status, shdr_add_status;
+
+       virt_addr = mboxq->sge_array->addr[0];
+       /* The IOCTL status is embedded in the mailbox subheader. */
+       shdr = (union lpfc_sli4_cfg_shdr *) virt_addr;
+       shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
+       shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
+
+       if ((shdr_status || shdr_add_status) &&
+               (shdr_status != STATUS_FCF_IN_USE))
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2558 ADD_FCF_RECORD mailbox failed with "
+                       "status x%x add_status x%x\n",
+                       shdr_status, shdr_add_status);
+
+       lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
+ * lpfc_sli4_add_fcf_record - Manually add an FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record:  pointer to the initialized fcf record to add.
+ *
+ * This routine is invoked to manually add a single FCF record. The caller
+ * must pass a completely initialized FCF_Record.  This routine takes
+ * care of the nonembedded mailbox operations.
+ **/
+int
+lpfc_sli4_add_fcf_record(struct lpfc_hba *phba, struct fcf_record *fcf_record)
+{
+       int rc = 0;
+       LPFC_MBOXQ_t *mboxq;
+       uint8_t *bytep;
+       void *virt_addr;
+       dma_addr_t phys_addr;
+       struct lpfc_mbx_sge sge;
+       uint32_t alloc_len, req_len;
+       uint32_t fcfindex;
+
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2009 Failed to allocate mbox for ADD_FCF cmd\n");
+               return -ENOMEM;
+       }
+
+       req_len = sizeof(struct fcf_record) + sizeof(union lpfc_sli4_cfg_shdr) +
+                 sizeof(uint32_t);
+
+       /* Allocate DMA memory and set up the non-embedded mailbox command */
+       alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+                                    LPFC_MBOX_OPCODE_FCOE_ADD_FCF,
+                                    req_len, LPFC_SLI4_MBX_NEMBED);
+       if (alloc_len < req_len) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2523 Allocated DMA memory size (x%x) is "
+                       "less than the requested DMA memory "
+                       "size (x%x)\n", alloc_len, req_len);
+               lpfc_sli4_mbox_cmd_free(phba, mboxq);
+               return -ENOMEM;
+       }
+
+       /*
+        * Get the first SGE entry from the non-embedded DMA memory.  This
+        * routine only uses a single SGE.
+        */
+       lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+       phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+       if (unlikely(!mboxq->sge_array)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "2526 Failed to get the non-embedded SGE "
+                               "virtual address\n");
+               lpfc_sli4_mbox_cmd_free(phba, mboxq);
+               return -ENOMEM;
+       }
+       virt_addr = mboxq->sge_array->addr[0];
+       /*
+        * Configure the FCF record for FCFI 0.  This is the driver's
+        * hardcoded default and gets used in nonFIP mode.
+        */
+       fcfindex = bf_get(lpfc_fcf_record_fcf_index, fcf_record);
+       bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+       lpfc_sli_pcimem_bcopy(&fcfindex, bytep, sizeof(uint32_t));
+
+       /*
+        * Copy the fcf_index and the FCF Record Data. The data starts after
+        * the FCoE header plus word10. The data copy needs to be endian
+        * correct.
+        */
+       bytep += sizeof(uint32_t);
+       lpfc_sli_pcimem_bcopy(fcf_record, bytep, sizeof(struct fcf_record));
+       mboxq->vport = phba->pport;
+       mboxq->mbox_cmpl = lpfc_mbx_cmpl_add_fcf_record;
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                       "2515 ADD_FCF_RECORD mailbox failed with "
+                       "status 0x%x\n", rc);
+               lpfc_sli4_mbox_cmd_free(phba, mboxq);
+               rc = -EIO;
+       } else
+               rc = 0;
+
+       return rc;
+}
+
+/**
+ * lpfc_sli4_build_dflt_fcf_record - Build the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record:  pointer to the fcf record to write the default data.
+ * @fcf_index: FCF table entry index.
+ *
+ * This routine is invoked to build the driver's default FCF record.  The
+ * values used are hardcoded.  This routine handles memory initialization.
+ *
+ **/
+void
+lpfc_sli4_build_dflt_fcf_record(struct lpfc_hba *phba,
+                               struct fcf_record *fcf_record,
+                               uint16_t fcf_index)
+{
+       memset(fcf_record, 0, sizeof(struct fcf_record));
+       fcf_record->max_rcv_size = LPFC_FCOE_MAX_RCV_SIZE;
+       fcf_record->fka_adv_period = LPFC_FCOE_FKA_ADV_PER;
+       fcf_record->fip_priority = LPFC_FCOE_FIP_PRIORITY;
+       bf_set(lpfc_fcf_record_mac_0, fcf_record, phba->fc_map[0]);
+       bf_set(lpfc_fcf_record_mac_1, fcf_record, phba->fc_map[1]);
+       bf_set(lpfc_fcf_record_mac_2, fcf_record, phba->fc_map[2]);
+       bf_set(lpfc_fcf_record_mac_3, fcf_record, LPFC_FCOE_FCF_MAC3);
+       bf_set(lpfc_fcf_record_mac_4, fcf_record, LPFC_FCOE_FCF_MAC4);
+       bf_set(lpfc_fcf_record_mac_5, fcf_record, LPFC_FCOE_FCF_MAC5);
+       bf_set(lpfc_fcf_record_fc_map_0, fcf_record, phba->fc_map[0]);
+       bf_set(lpfc_fcf_record_fc_map_1, fcf_record, phba->fc_map[1]);
+       bf_set(lpfc_fcf_record_fc_map_2, fcf_record, phba->fc_map[2]);
+       bf_set(lpfc_fcf_record_fcf_valid, fcf_record, 1);
+       bf_set(lpfc_fcf_record_fcf_index, fcf_record, fcf_index);
+       bf_set(lpfc_fcf_record_mac_addr_prov, fcf_record,
+               LPFC_FCF_FPMA | LPFC_FCF_SPMA);
+       /* Set the VLAN bit map */
+       if (phba->valid_vlan) {
+               fcf_record->vlan_bitmap[phba->vlan_id / 8]
+                       = 1 << (phba->vlan_id % 8);
+       }
+}
+
+/**
+ * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read up to @fcf_num of FCF record from the
+ * device starting with the given @fcf_index.
+ **/
+int
+lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+       int rc = 0, error;
+       LPFC_MBOXQ_t *mboxq;
+       void *virt_addr;
+       dma_addr_t phys_addr;
+       uint8_t *bytep;
+       struct lpfc_mbx_sge sge;
+       uint32_t alloc_len, req_len;
+       struct lpfc_mbx_read_fcf_tbl *read_fcf;
+
+       mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+       if (!mboxq) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "2000 Failed to allocate mbox for "
+                               "READ_FCF cmd\n");
+               return -ENOMEM;
+       }
+
+       req_len = sizeof(struct fcf_record) +
+                 sizeof(union lpfc_sli4_cfg_shdr) + 2 * sizeof(uint32_t);
+
+       /* Set up READ_FCF SLI4_CONFIG mailbox-ioctl command */
+       alloc_len = lpfc_sli4_config(phba, mboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
+                        LPFC_MBOX_OPCODE_FCOE_READ_FCF_TABLE, req_len,
+                        LPFC_SLI4_MBX_NEMBED);
+
+       if (alloc_len < req_len) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+                               "0291 Allocated DMA memory size (x%x) is "
+                               "less than the requested DMA memory "
+                               "size (x%x)\n", alloc_len, req_len);
+               lpfc_sli4_mbox_cmd_free(phba, mboxq);
+               return -ENOMEM;
+       }
+
+       /* Get the first SGE entry from the non-embedded DMA memory. This
+        * routine only uses a single SGE.
+        */
+       lpfc_sli4_mbx_sge_get(mboxq, 0, &sge);
+       phys_addr = getPaddr(sge.pa_hi, sge.pa_lo);
+       if (unlikely(!mboxq->sge_array)) {
+               lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+                               "2527 Failed to get the non-embedded SGE "
+                               "virtual address\n");
+               lpfc_sli4_mbox_cmd_free(phba, mboxq);
+               return -ENOMEM;
+       }
+       virt_addr = mboxq->sge_array->addr[0];
+       read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
+
+       /* Set up command fields */
+       bf_set(lpfc_mbx_read_fcf_tbl_indx, &read_fcf->u.request, fcf_index);
+       /* Perform necessary endian conversion */
+       bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
+       lpfc_sli_pcimem_bcopy(bytep, bytep, sizeof(uint32_t));
+       mboxq->vport = phba->pport;
+       mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
+       rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+       if (rc == MBX_NOT_FINISHED) {
+               lpfc_sli4_mbox_cmd_free(phba, mboxq);
+               error = -EIO;
+       } else
+               error = 0;
+       return error;
+}
index 883938652a6a4eabb2417cf68b69aa1718492888..7d37eb7459bf05ac4ebe326c2df466e2dfb69a1e 100644 (file)
@@ -29,13 +29,23 @@ typedef enum _lpfc_ctx_cmd {
        LPFC_CTX_HOST
 } lpfc_ctx_cmd;
 
+/* This structure is used to carry the needed response IOCB states */
+struct lpfc_sli4_rspiocb_info {
+       uint8_t hw_status;
+       uint8_t bfield;
+#define LPFC_XB        0x1
+#define LPFC_PV        0x2
+       uint8_t priority;
+       uint8_t reserved;
+};
+
 /* This structure is used to handle IOCB requests / responses */
 struct lpfc_iocbq {
        /* lpfc_iocbqs are used in double linked lists */
        struct list_head list;
        struct list_head clist;
        uint16_t iotag;         /* pre-assigned IO tag */
-       uint16_t rsvd1;
+       uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
 
        IOCB_t iocb;            /* IOCB cmd */
        uint8_t retry;          /* retry counter for IOCB cmd - if needed */
@@ -65,7 +75,7 @@ struct lpfc_iocbq {
                           struct lpfc_iocbq *);
        void (*iocb_cmpl) (struct lpfc_hba *, struct lpfc_iocbq *,
                           struct lpfc_iocbq *);
-
+       struct lpfc_sli4_rspiocb_info sli4_info;
 };
 
 #define SLI_IOCB_RET_IOCB      1       /* Return IOCB if cmd ring full */
@@ -81,14 +91,18 @@ struct lpfc_iocbq {
 typedef struct lpfcMboxq {
        /* MBOXQs are used in single linked lists */
        struct list_head list;  /* ptr to next mailbox command */
-       MAILBOX_t mb;           /* Mailbox cmd */
-       struct lpfc_vport *vport;/* virutal port pointer */
+       union {
+               MAILBOX_t mb;           /* Mailbox cmd */
+               struct lpfc_mqe mqe;
+       } u;
+       struct lpfc_vport *vport;/* virtual port pointer */
        void *context1;         /* caller context information */
        void *context2;         /* caller context information */
 
        void (*mbox_cmpl) (struct lpfc_hba *, struct lpfcMboxq *);
        uint8_t mbox_flag;
-
+       struct lpfc_mcqe mcqe;
+       struct lpfc_mbx_nembed_sge_virt *sge_array;
 } LPFC_MBOXQ_t;
 
 #define MBX_POLL        1      /* poll mailbox till command done, then
@@ -230,10 +244,11 @@ struct lpfc_sli {
 
        /* Additional sli_flags */
 #define LPFC_SLI_MBOX_ACTIVE      0x100        /* HBA mailbox is currently active */
-#define LPFC_SLI2_ACTIVE          0x200        /* SLI2 overlay in firmware is active */
+#define LPFC_SLI_ACTIVE           0x200        /* SLI in firmware is active */
 #define LPFC_PROCESS_LA           0x400        /* Able to process link attention */
 #define LPFC_BLOCK_MGMT_IO        0x800        /* Don't allow mgmt mbx or iocb cmds */
 #define LPFC_MENLO_MAINT          0x1000 /* need for menl fw download */
+#define LPFC_SLI_ASYNC_MBX_BLK    0x2000 /* Async mailbox is blocked */
 
        struct lpfc_sli_ring ring[LPFC_MAX_RING];
        int fcp_ring;           /* ring used for FCP initiator commands */
@@ -261,6 +276,8 @@ struct lpfc_sli {
 
 #define LPFC_MBOX_TMO           30     /* Sec tmo for outstanding mbox
                                           command */
+#define LPFC_MBOX_SLI4_CONFIG_TMO 60   /* Sec tmo for outstanding mbox
+                                          command */
 #define LPFC_MBOX_TMO_FLASH_CMD 300     /* Sec tmo for outstanding FLASH write
                                         * or erase cmds. This is especially
                                         * long because of the potential of
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
new file mode 100644 (file)
index 0000000..5196b46
--- /dev/null
@@ -0,0 +1,467 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#define LPFC_ACTIVE_MBOX_WAIT_CNT               100
+#define LPFC_RELEASE_NOTIFICATION_INTERVAL     32
+#define LPFC_GET_QE_REL_INT                    32
+#define LPFC_RPI_LOW_WATER_MARK                        10
+/* Number of SGL entries can be posted in a 4KB nonembedded mbox command */
+#define LPFC_NEMBED_MBOX_SGL_CNT               254
+
+/* Multi-queue arrangement for fast-path FCP work queues */
+#define LPFC_FN_EQN_MAX       8
+#define LPFC_SP_EQN_DEF       1
+#define LPFC_FP_EQN_DEF       1
+#define LPFC_FP_EQN_MIN       1
+#define LPFC_FP_EQN_MAX       (LPFC_FN_EQN_MAX - LPFC_SP_EQN_DEF)
+
+#define LPFC_FN_WQN_MAX       32
+#define LPFC_SP_WQN_DEF       1
+#define LPFC_FP_WQN_DEF       4
+#define LPFC_FP_WQN_MIN       1
+#define LPFC_FP_WQN_MAX       (LPFC_FN_WQN_MAX - LPFC_SP_WQN_DEF)
+
+/*
+ * Provide the default FCF Record attributes used by the driver
+ * when nonFIP mode is configured and there is no other default
+ * FCF Record attributes.
+ */
+#define LPFC_FCOE_FCF_DEF_INDEX        0
+#define LPFC_FCOE_FCF_GET_FIRST        0xFFFF
+#define LPFC_FCOE_FCF_NEXT_NONE        0xFFFF
+
+/* First 3 bytes of default FCF MAC is specified by FC_MAP */
+#define LPFC_FCOE_FCF_MAC3     0xFF
+#define LPFC_FCOE_FCF_MAC4     0xFF
+#define LPFC_FCOE_FCF_MAC5     0xFE
+#define LPFC_FCOE_FCF_MAP0     0x0E
+#define LPFC_FCOE_FCF_MAP1     0xFC
+#define LPFC_FCOE_FCF_MAP2     0x00
+#define LPFC_FCOE_MAX_RCV_SIZE 0x5AC
+#define LPFC_FCOE_FKA_ADV_PER  0
+#define LPFC_FCOE_FIP_PRIORITY 0x80
+
+enum lpfc_sli4_queue_type {
+       LPFC_EQ,
+       LPFC_GCQ,
+       LPFC_MCQ,
+       LPFC_WCQ,
+       LPFC_RCQ,
+       LPFC_MQ,
+       LPFC_WQ,
+       LPFC_HRQ,
+       LPFC_DRQ
+};
+
+/* The queue sub-type defines the functional purpose of the queue */
+enum lpfc_sli4_queue_subtype {
+       LPFC_NONE,
+       LPFC_MBOX,
+       LPFC_FCP,
+       LPFC_ELS,
+       LPFC_USOL
+};
+
+union sli4_qe {
+       void *address;
+       struct lpfc_eqe *eqe;
+       struct lpfc_cqe *cqe;
+       struct lpfc_mcqe *mcqe;
+       struct lpfc_wcqe_complete *wcqe_complete;
+       struct lpfc_wcqe_release *wcqe_release;
+       struct sli4_wcqe_xri_aborted *wcqe_xri_aborted;
+       struct lpfc_rcqe_complete *rcqe_complete;
+       struct lpfc_mqe *mqe;
+       union  lpfc_wqe *wqe;
+       struct lpfc_rqe *rqe;
+};
+
+struct lpfc_queue {
+       struct list_head list;
+       enum lpfc_sli4_queue_type type;
+       enum lpfc_sli4_queue_subtype subtype;
+       struct lpfc_hba *phba;
+       struct list_head child_list;
+       uint32_t entry_count;   /* Number of entries to support on the queue */
+       uint32_t entry_size;    /* Size of each queue entry. */
+       uint32_t queue_id;      /* Queue ID assigned by the hardware */
+       struct list_head page_list;
+       uint32_t page_count;    /* Number of pages allocated for this queue */
+
+       uint32_t host_index;    /* The host's index for putting or getting */
+       uint32_t hba_index;     /* The last known hba index for get or put */
+       union sli4_qe qe[1];    /* array to index entries (must be last) */
+};
+
+struct lpfc_cq_event {
+       struct list_head list;
+       union {
+               struct lpfc_mcqe                mcqe_cmpl;
+               struct lpfc_acqe_link           acqe_link;
+               struct lpfc_acqe_fcoe           acqe_fcoe;
+               struct lpfc_acqe_dcbx           acqe_dcbx;
+               struct lpfc_rcqe                rcqe_cmpl;
+               struct sli4_wcqe_xri_aborted    wcqe_axri;
+       } cqe;
+};
+
+struct lpfc_sli4_link {
+       uint8_t speed;
+       uint8_t duplex;
+       uint8_t status;
+       uint8_t physical;
+       uint8_t fault;
+};
+
+struct lpfc_fcf {
+       uint8_t  fabric_name[8];
+       uint8_t  mac_addr[6];
+       uint16_t fcf_indx;
+       uint16_t fcfi;
+       uint32_t fcf_flag;
+#define FCF_AVAILABLE  0x01 /* FCF available for discovery */
+#define FCF_REGISTERED 0x02 /* FCF registered with FW */
+#define FCF_DISCOVERED 0x04 /* FCF discovery started  */
+#define FCF_BOOT_ENABLE 0x08 /* Boot bios use this FCF */
+#define FCF_IN_USE     0x10 /* Atleast one discovery completed */
+#define FCF_VALID_VLAN 0x20 /* Use the vlan id specified */
+       uint32_t priority;
+       uint32_t addr_mode;
+       uint16_t vlan_id;
+};
+
+#define LPFC_REGION23_SIGNATURE "RG23"
+#define LPFC_REGION23_VERSION  1
+#define LPFC_REGION23_LAST_REC  0xff
+struct lpfc_fip_param_hdr {
+       uint8_t type;
+#define FCOE_PARAM_TYPE                0xA0
+       uint8_t length;
+#define FCOE_PARAM_LENGTH      2
+       uint8_t parm_version;
+#define FIPP_VERSION           0x01
+       uint8_t parm_flags;
+#define        lpfc_fip_param_hdr_fipp_mode_SHIFT      6
+#define        lpfc_fip_param_hdr_fipp_mode_MASK       0x3
+#define lpfc_fip_param_hdr_fipp_mode_WORD      parm_flags
+#define        FIPP_MODE_ON                            0x2
+#define        FIPP_MODE_OFF                           0x0
+#define FIPP_VLAN_VALID                                0x1
+};
+
+struct lpfc_fcoe_params {
+       uint8_t fc_map[3];
+       uint8_t reserved1;
+       uint16_t vlan_tag;
+       uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_hdr {
+       uint8_t type;
+#define FCOE_CONN_TBL_TYPE             0xA1
+       uint8_t length;   /* words */
+       uint8_t reserved[2];
+};
+
+struct lpfc_fcf_conn_rec {
+       uint16_t flags;
+#define        FCFCNCT_VALID           0x0001
+#define        FCFCNCT_BOOT            0x0002
+#define        FCFCNCT_PRIMARY         0x0004   /* if not set, Secondary */
+#define        FCFCNCT_FBNM_VALID      0x0008
+#define        FCFCNCT_SWNM_VALID      0x0010
+#define        FCFCNCT_VLAN_VALID      0x0020
+#define        FCFCNCT_AM_VALID        0x0040
+#define        FCFCNCT_AM_PREFERRED    0x0080   /* if not set, AM Required */
+#define        FCFCNCT_AM_SPMA         0x0100   /* if not set, FPMA */
+
+       uint16_t vlan_tag;
+       uint8_t fabric_name[8];
+       uint8_t switch_name[8];
+};
+
+struct lpfc_fcf_conn_entry {
+       struct list_head list;
+       struct lpfc_fcf_conn_rec conn_rec;
+};
+
+/*
+ * Define the host's bootstrap mailbox.  This structure contains
+ * the member attributes needed to create, use, and destroy the
+ * bootstrap mailbox region.
+ *
+ * The macro definitions for the bmbx data structure are defined
+ * in lpfc_hw4.h with the register definition.
+ */
+struct lpfc_bmbx {
+       struct lpfc_dmabuf *dmabuf;
+       struct dma_address dma_address;
+       void *avirt;
+       dma_addr_t aphys;
+       uint32_t bmbx_size;
+};
+
+#define LPFC_EQE_SIZE LPFC_EQE_SIZE_4
+
+#define LPFC_EQE_SIZE_4B       4
+#define LPFC_EQE_SIZE_16B      16
+#define LPFC_CQE_SIZE          16
+#define LPFC_WQE_SIZE          64
+#define LPFC_MQE_SIZE          256
+#define LPFC_RQE_SIZE          8
+
+#define LPFC_EQE_DEF_COUNT     1024
+#define LPFC_CQE_DEF_COUNT      256
+#define LPFC_WQE_DEF_COUNT      64
+#define LPFC_MQE_DEF_COUNT      16
+#define LPFC_RQE_DEF_COUNT     512
+
+#define LPFC_QUEUE_NOARM       false
+#define LPFC_QUEUE_REARM       true
+
+
+/*
+ * SLI4 CT field defines
+ */
+#define SLI4_CT_RPI 0
+#define SLI4_CT_VPI 1
+#define SLI4_CT_VFI 2
+#define SLI4_CT_FCFI 3
+
+#define LPFC_SLI4_MAX_SEGMENT_SIZE 0x10000
+
+/*
+ * SLI4 specific data structures
+ */
+struct lpfc_max_cfg_param {
+       uint16_t max_xri;
+       uint16_t xri_base;
+       uint16_t xri_used;
+       uint16_t max_rpi;
+       uint16_t rpi_base;
+       uint16_t rpi_used;
+       uint16_t max_vpi;
+       uint16_t vpi_base;
+       uint16_t vpi_used;
+       uint16_t max_vfi;
+       uint16_t vfi_base;
+       uint16_t vfi_used;
+       uint16_t max_fcfi;
+       uint16_t fcfi_base;
+       uint16_t fcfi_used;
+       uint16_t max_eq;
+       uint16_t max_rq;
+       uint16_t max_cq;
+       uint16_t max_wq;
+};
+
+struct lpfc_hba;
+/* SLI4 HBA multi-fcp queue handler struct */
+struct lpfc_fcp_eq_hdl {
+       uint32_t idx;
+       struct lpfc_hba *phba;
+};
+
+/* SLI4 HBA data structure entries */
+struct lpfc_sli4_hba {
+       void __iomem *conf_regs_memmap_p; /* Kernel memory mapped address for
+                                            PCI BAR0, config space registers */
+       void __iomem *ctrl_regs_memmap_p; /* Kernel memory mapped address for
+                                            PCI BAR1, control registers */
+       void __iomem *drbl_regs_memmap_p; /* Kernel memory mapped address for
+                                            PCI BAR2, doorbell registers */
+       /* BAR0 PCI config space register memory map */
+       void __iomem *UERRLOregaddr; /* Address to UERR_STATUS_LO register */
+       void __iomem *UERRHIregaddr; /* Address to UERR_STATUS_HI register */
+       void __iomem *ONLINE0regaddr; /* Address to components of internal UE */
+       void __iomem *ONLINE1regaddr; /* Address to components of internal UE */
+#define LPFC_ONLINE_NERR       0xFFFFFFFF
+       void __iomem *SCRATCHPADregaddr; /* Address to scratchpad register */
+       /* BAR1 FCoE function CSR register memory map */
+       void __iomem *STAregaddr;    /* Address to HST_STATE register */
+       void __iomem *ISRregaddr;    /* Address to HST_ISR register */
+       void __iomem *IMRregaddr;    /* Address to HST_IMR register */
+       void __iomem *ISCRregaddr;   /* Address to HST_ISCR register */
+       /* BAR2 VF-0 doorbell register memory map */
+       void __iomem *RQDBregaddr;   /* Address to RQ_DOORBELL register */
+       void __iomem *WQDBregaddr;   /* Address to WQ_DOORBELL register */
+       void __iomem *EQCQDBregaddr; /* Address to EQCQ_DOORBELL register */
+       void __iomem *MQDBregaddr;   /* Address to MQ_DOORBELL register */
+       void __iomem *BMBXregaddr;   /* Address to BootStrap MBX register */
+
+       struct msix_entry *msix_entries;
+       uint32_t cfg_eqn;
+       struct lpfc_fcp_eq_hdl *fcp_eq_hdl; /* FCP per-WQ handle */
+       /* Pointers to the constructed SLI4 queues */
+       struct lpfc_queue **fp_eq; /* Fast-path event queue */
+       struct lpfc_queue *sp_eq;  /* Slow-path event queue */
+       struct lpfc_queue **fcp_wq;/* Fast-path FCP work queue */
+       struct lpfc_queue *mbx_wq; /* Slow-path MBOX work queue */
+       struct lpfc_queue *els_wq; /* Slow-path ELS work queue */
+       struct lpfc_queue *hdr_rq; /* Slow-path Header Receive queue */
+       struct lpfc_queue *dat_rq; /* Slow-path Data Receive queue */
+       struct lpfc_queue **fcp_cq;/* Fast-path FCP compl queue */
+       struct lpfc_queue *mbx_cq; /* Slow-path mailbox complete queue */
+       struct lpfc_queue *els_cq; /* Slow-path ELS response complete queue */
+       struct lpfc_queue *rxq_cq; /* Slow-path unsolicited complete queue */
+
+       /* Setup information for various queue parameters */
+       int eq_esize;
+       int eq_ecount;
+       int cq_esize;
+       int cq_ecount;
+       int wq_esize;
+       int wq_ecount;
+       int mq_esize;
+       int mq_ecount;
+       int rq_esize;
+       int rq_ecount;
+#define LPFC_SP_EQ_MAX_INTR_SEC         10000
+#define LPFC_FP_EQ_MAX_INTR_SEC         10000
+
+       uint32_t intr_enable;
+       struct lpfc_bmbx bmbx;
+       struct lpfc_max_cfg_param max_cfg_param;
+       uint16_t next_xri; /* last_xri - max_cfg_param.xri_base = used */
+       uint16_t next_rpi;
+       uint16_t scsi_xri_max;
+       uint16_t scsi_xri_cnt;
+       struct list_head lpfc_free_sgl_list;
+       struct list_head lpfc_sgl_list;
+       struct lpfc_sglq **lpfc_els_sgl_array;
+       struct list_head lpfc_abts_els_sgl_list;
+       struct lpfc_scsi_buf **lpfc_scsi_psb_array;
+       struct list_head lpfc_abts_scsi_buf_list;
+       uint32_t total_sglq_bufs;
+       struct lpfc_sglq **lpfc_sglq_active_list;
+       struct list_head lpfc_rpi_hdr_list;
+       unsigned long *rpi_bmask;
+       uint16_t rpi_count;
+       struct lpfc_sli4_flags sli4_flags;
+       struct list_head sp_rspiocb_work_queue;
+       struct list_head sp_cqe_event_pool;
+       struct list_head sp_asynce_work_queue;
+       struct list_head sp_fcp_xri_aborted_work_queue;
+       struct list_head sp_els_xri_aborted_work_queue;
+       struct list_head sp_unsol_work_queue;
+       struct lpfc_sli4_link link_state;
+       spinlock_t abts_scsi_buf_list_lock; /* list of aborted SCSI IOs */
+       spinlock_t abts_sgl_list_lock; /* list of aborted els IOs */
+};
+
+enum lpfc_sge_type {
+       GEN_BUFF_TYPE,
+       SCSI_BUFF_TYPE
+};
+
+struct lpfc_sglq {
+       /* lpfc_sglqs are used in double linked lists */
+       struct list_head list;
+       struct list_head clist;
+       enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+       uint16_t iotag;         /* pre-assigned IO tag */
+       uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
+       struct sli4_sge *sgl;   /* pre-assigned SGL */
+       void *virt;             /* virtual address. */
+       dma_addr_t phys;        /* physical address */
+};
+
+struct lpfc_rpi_hdr {
+       struct list_head list;
+       uint32_t len;
+       struct lpfc_dmabuf *dmabuf;
+       uint32_t page_count;
+       uint32_t start_rpi;
+};
+
+/*
+ * SLI4 specific function prototypes
+ */
+int lpfc_pci_function_reset(struct lpfc_hba *);
+int lpfc_sli4_hba_setup(struct lpfc_hba *);
+int lpfc_sli4_hba_down(struct lpfc_hba *);
+int lpfc_sli4_config(struct lpfc_hba *, struct lpfcMboxq *, uint8_t,
+                    uint8_t, uint32_t, bool);
+void lpfc_sli4_mbox_cmd_free(struct lpfc_hba *, struct lpfcMboxq *);
+void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
+void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
+                          struct lpfc_mbx_sge *);
+
+void lpfc_sli4_hba_reset(struct lpfc_hba *);
+struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
+                       uint32_t);
+void lpfc_sli4_queue_free(struct lpfc_queue *);
+uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint16_t);
+uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+                       struct lpfc_queue *, uint32_t, uint32_t);
+uint32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
+                       struct lpfc_queue *, uint32_t);
+uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+                       struct lpfc_queue *, uint32_t);
+uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+                       struct lpfc_queue *, struct lpfc_queue *, uint32_t);
+uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+                        struct lpfc_queue *);
+int lpfc_sli4_queue_setup(struct lpfc_hba *);
+void lpfc_sli4_queue_unset(struct lpfc_hba *);
+int lpfc_sli4_post_sgl(struct lpfc_hba *, dma_addr_t, dma_addr_t, uint16_t);
+int lpfc_sli4_repost_scsi_sgl_list(struct lpfc_hba *);
+int lpfc_sli4_remove_all_sgl_pages(struct lpfc_hba *);
+uint16_t lpfc_sli4_next_xritag(struct lpfc_hba *);
+int lpfc_sli4_post_async_mbox(struct lpfc_hba *);
+int lpfc_sli4_post_sgl_list(struct lpfc_hba *phba);
+int lpfc_sli4_post_scsi_sgl_block(struct lpfc_hba *, struct list_head *, int);
+struct lpfc_cq_event *__lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+struct lpfc_cq_event *lpfc_sli4_cq_event_alloc(struct lpfc_hba *);
+void __lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+void lpfc_sli4_cq_event_release(struct lpfc_hba *, struct lpfc_cq_event *);
+int lpfc_sli4_init_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_post_rpi_hdr(struct lpfc_hba *, struct lpfc_rpi_hdr *);
+int lpfc_sli4_post_all_rpi_hdrs(struct lpfc_hba *);
+struct lpfc_rpi_hdr *lpfc_sli4_create_rpi_hdr(struct lpfc_hba *);
+void lpfc_sli4_remove_rpi_hdrs(struct lpfc_hba *);
+int lpfc_sli4_alloc_rpi(struct lpfc_hba *);
+void lpfc_sli4_free_rpi(struct lpfc_hba *, int);
+void lpfc_sli4_remove_rpis(struct lpfc_hba *);
+void lpfc_sli4_async_event_proc(struct lpfc_hba *);
+int lpfc_sli4_resume_rpi(struct lpfc_nodelist *);
+void lpfc_sli4_fcp_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_els_xri_abort_event_proc(struct lpfc_hba *);
+void lpfc_sli4_fcp_xri_aborted(struct lpfc_hba *,
+                              struct sli4_wcqe_xri_aborted *);
+void lpfc_sli4_els_xri_aborted(struct lpfc_hba *,
+                              struct sli4_wcqe_xri_aborted *);
+int lpfc_sli4_brdreset(struct lpfc_hba *);
+int lpfc_sli4_add_fcf_record(struct lpfc_hba *, struct fcf_record *);
+void lpfc_sli_remove_dflt_fcf(struct lpfc_hba *);
+int lpfc_sli4_get_els_iocb_cnt(struct lpfc_hba *);
+int lpfc_sli4_init_vpi(struct lpfc_hba *, uint16_t);
+uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
+uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
+void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_post_status_check(struct lpfc_hba *);
+uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
+
index e599519e3078c214be69fd3ad415cc6648b653c2..6b8a148f0a55ca7f001dc87586972d9ac6b0ce65 100644 (file)
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.1"
+#define LPFC_DRIVER_VERSION "8.3.2"
 
 #define LPFC_DRIVER_NAME               "lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME    "lpfc:sp"
index 917ad56b0aff33c358e9632630142c75003f53e9..a6313ee84ac50356f289f02d28cc0c1788ba47d5 100644 (file)
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
 #include <scsi/scsi_transport_fc.h>
+#include "lpfc_hw4.h"
 #include "lpfc_hw.h"
 #include "lpfc_sli.h"
+#include "lpfc_sli4.h"
 #include "lpfc_nl.h"
 #include "lpfc_disc.h"
 #include "lpfc_scsi.h"
@@ -89,6 +91,8 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
                vpi = 0;
        else
                set_bit(vpi, phba->vpi_bmask);
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               phba->sli4_hba.max_cfg_param.vpi_used++;
        spin_unlock_irq(&phba->hbalock);
        return vpi;
 }
@@ -96,8 +100,12 @@ lpfc_alloc_vpi(struct lpfc_hba *phba)
 static void
 lpfc_free_vpi(struct lpfc_hba *phba, int vpi)
 {
+       if (vpi == 0)
+               return;
        spin_lock_irq(&phba->hbalock);
        clear_bit(vpi, phba->vpi_bmask);
+       if (phba->sli_rev == LPFC_SLI_REV4)
+               phba->sli4_hba.max_cfg_param.vpi_used--;
        spin_unlock_irq(&phba->hbalock);
 }
 
@@ -113,7 +121,7 @@ lpfc_vport_sparm(struct lpfc_hba *phba, struct lpfc_vport *vport)
        if (!pmb) {
                return -ENOMEM;
        }
-       mb = &pmb->mb;
+       mb = &pmb->u.mb;
 
        lpfc_read_sparam(phba, pmb, vport->vpi);
        /*
@@ -243,23 +251,22 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
                    (vport->fc_flag & wait_flags)  ||
                    ((vport->port_state > LPFC_VPORT_FAILED) &&
                     (vport->port_state < LPFC_VPORT_READY))) {
-                       lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
                                        "1833 Vport discovery quiesce Wait:"
-                                       " vpi x%x state x%x fc_flags x%x"
+                                       " state x%x fc_flags x%x"
                                        " num_nodes x%x, waiting 1000 msecs"
                                        " total wait msecs x%x\n",
-                                       vport->vpi, vport->port_state,
-                                       vport->fc_flag, vport->num_disc_nodes,
+                                       vport->port_state, vport->fc_flag,
+                                       vport->num_disc_nodes,
                                        jiffies_to_msecs(jiffies - start_time));
                        msleep(1000);
                } else {
                        /* Base case.  Wait variants satisfied.  Break out */
-                       lpfc_printf_log(phba, KERN_INFO, LOG_VPORT,
+                       lpfc_printf_vlog(vport, KERN_INFO, LOG_VPORT,
                                         "1834 Vport discovery quiesced:"
-                                        " vpi x%x state x%x fc_flags x%x"
+                                        " state x%x fc_flags x%x"
                                         " wait msecs x%x\n",
-                                        vport->vpi, vport->port_state,
-                                        vport->fc_flag,
+                                        vport->port_state, vport->fc_flag,
                                         jiffies_to_msecs(jiffies
                                                - start_time));
                        break;
@@ -267,12 +274,10 @@ static void lpfc_discovery_wait(struct lpfc_vport *vport)
        }
 
        if (time_after(jiffies, wait_time_max))
-               lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
                                "1835 Vport discovery quiesce failed:"
-                               " vpi x%x state x%x fc_flags x%x"
-                               " wait msecs x%x\n",
-                               vport->vpi, vport->port_state,
-                               vport->fc_flag,
+                               " state x%x fc_flags x%x wait msecs x%x\n",
+                               vport->port_state, vport->fc_flag,
                                jiffies_to_msecs(jiffies - start_time));
 }
 
@@ -308,6 +313,21 @@ lpfc_vport_create(struct fc_vport *fc_vport, bool disable)
                goto error_out;
        }
 
+       /*
+        * In SLI4, the vpi must be activated before it can be used
+        * by the port.
+        */
+       if (phba->sli_rev == LPFC_SLI_REV4) {
+               rc = lpfc_sli4_init_vpi(phba, vpi);
+               if (rc) {
+                       lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+                                       "1838 Failed to INIT_VPI on vpi %d "
+                                       "status %d\n", vpi, rc);
+                       rc = VPORT_NORESOURCES;
+                       lpfc_free_vpi(phba, vpi);
+                       goto error_out;
+               }
+       }
 
        /* Assign an unused board number */
        if ((instance = lpfc_get_instance()) < 0) {
@@ -535,6 +555,16 @@ lpfc_vport_delete(struct fc_vport *fc_vport)
                                 "physical host\n");
                return VPORT_ERROR;
        }
+
+       /* If the vport is a static vport fail the deletion. */
+       if ((vport->vport_flag & STATIC_VPORT) &&
+               !(phba->pport->load_flag & FC_UNLOADING)) {
+               lpfc_printf_vlog(vport, KERN_ERR, LOG_VPORT,
+                                "1837 vport_delete failed: Cannot delete "
+                                "static vport.\n");
+               return VPORT_ERROR;
+       }
+
        /*
         * If we are not unloading the driver then prevent the vport_delete
         * from happening until after this vport's discovery is finished.
@@ -710,7 +740,7 @@ lpfc_create_vport_work_array(struct lpfc_hba *phba)
        struct lpfc_vport *port_iterator;
        struct lpfc_vport **vports;
        int index = 0;
-       vports = kzalloc((phba->max_vpi + 1) * sizeof(struct lpfc_vport *),
+       vports = kzalloc((phba->max_vports + 1) * sizeof(struct lpfc_vport *),
                         GFP_KERNEL);
        if (vports == NULL)
                return NULL;
@@ -734,7 +764,7 @@ lpfc_destroy_vport_work_array(struct lpfc_hba *phba, struct lpfc_vport **vports)
        int i;
        if (vports == NULL)
                return;
-       for (i=0; vports[i] != NULL && i <= phba->max_vpi; i++)
+       for (i = 0; vports[i] != NULL && i <= phba->max_vports; i++)
                scsi_host_put(lpfc_shost_from_vport(vports[i]));
        kfree(vports);
 }
index 36b1d1052ba1281eb6e33d8fe154d7fce87aacf1..286c185fa9e40a5b7347de0dc99b06c4d2f0e617 100644 (file)
@@ -61,6 +61,7 @@
 #include <scsi/scsi_tcq.h>
 #include <scsi/scsi_transport_sas.h>
 #include <scsi/scsi_dbg.h>
+#include <scsi/scsi_eh.h>
 
 #include "mpt2sas_debug.h"
 
 #define MPT2SAS_DRIVER_NAME            "mpt2sas"
 #define MPT2SAS_AUTHOR "LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION    "LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION         "01.100.02.00"
+#define MPT2SAS_DRIVER_VERSION         "01.100.03.00"
 #define MPT2SAS_MAJOR_VERSION          01
 #define MPT2SAS_MINOR_VERSION          100
-#define MPT2SAS_BUILD_VERSION          02
+#define MPT2SAS_BUILD_VERSION          03
 #define MPT2SAS_RELEASE_VERSION                00
 
 /*
index ba6ab170bdf02ca5fc75e2b8a6b22bce943b23c1..14e473d1fa7b18193e0673d86dec3bfc8737aa84 100644 (file)
@@ -473,7 +473,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
 }
 
 /**
- * _ctl_do_task_abort - assign an active smid to the abort_task
+ * _ctl_set_task_mid - assign an active smid to tm request
  * @ioc: per adapter object
  * @karg - (struct mpt2_ioctl_command)
  * @tm_request - pointer to mf from user space
@@ -482,7 +482,7 @@ _ctl_poll(struct file *filep, poll_table *wait)
  * during failure, the reply frame is filled.
  */
 static int
-_ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
+_ctl_set_task_mid(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
     Mpi2SCSITaskManagementRequest_t *tm_request)
 {
        u8 found = 0;
@@ -494,6 +494,14 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
        Mpi2SCSITaskManagementReply_t *tm_reply;
        u32 sz;
        u32 lun;
+       char *desc = NULL;
+
+       if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+               desc = "abort_task";
+       else if (tm_request->TaskType == MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK)
+               desc = "query_task";
+       else
+               return 0;
 
        lun = scsilun_to_int((struct scsi_lun *)tm_request->LUN);
 
@@ -517,13 +525,13 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
        spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
 
        if (!found) {
-               dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
-                   "DevHandle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
-                   tm_request->DevHandle, lun));
+               dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+                   "handle(0x%04x), lun(%d), no active mid!!\n", ioc->name,
+                   desc, tm_request->DevHandle, lun));
                tm_reply = ioc->ctl_cmds.reply;
                tm_reply->DevHandle = tm_request->DevHandle;
                tm_reply->Function = MPI2_FUNCTION_SCSI_TASK_MGMT;
-               tm_reply->TaskType = MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK;
+               tm_reply->TaskType = tm_request->TaskType;
                tm_reply->MsgLength = sizeof(Mpi2SCSITaskManagementReply_t)/4;
                tm_reply->VP_ID = tm_request->VP_ID;
                tm_reply->VF_ID = tm_request->VF_ID;
@@ -535,9 +543,9 @@ _ctl_do_task_abort(struct MPT2SAS_ADAPTER *ioc, struct mpt2_ioctl_command *karg,
                return 1;
        }
 
-       dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "ABORT_TASK: "
-           "DevHandle(0x%04x), lun(%d), smid(%d)\n", ioc->name,
-           tm_request->DevHandle, lun, tm_request->TaskMID));
+       dctlprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+           "handle(0x%04x), lun(%d), task_mid(%d)\n", ioc->name,
+           desc, tm_request->DevHandle, lun, tm_request->TaskMID));
        return 0;
 }
 
@@ -739,8 +747,10 @@ _ctl_do_mpt_command(struct MPT2SAS_ADAPTER *ioc,
                    (Mpi2SCSITaskManagementRequest_t *)mpi_request;
 
                if (tm_request->TaskType ==
-                   MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
-                       if (_ctl_do_task_abort(ioc, &karg, tm_request)) {
+                   MPI2_SCSITASKMGMT_TASKTYPE_ABORT_TASK ||
+                   tm_request->TaskType ==
+                   MPI2_SCSITASKMGMT_TASKTYPE_QUERY_TASK) {
+                       if (_ctl_set_task_mid(ioc, &karg, tm_request)) {
                                mpt2sas_base_free_smid(ioc, smid);
                                goto out;
                        }
index e3a7967259e75fe5cfac5f5101a4e8f2912992f6..2a01a5f2a84dddc4d105d5ef88030abe1adc21ab 100644 (file)
@@ -197,12 +197,12 @@ static struct pci_device_id scsih_pci_table[] = {
 MODULE_DEVICE_TABLE(pci, scsih_pci_table);
 
 /**
- * scsih_set_debug_level - global setting of ioc->logging_level.
+ * _scsih_set_debug_level - global setting of ioc->logging_level.
  *
  * Note: The logging levels are defined in mpt2sas_debug.h.
  */
 static int
-scsih_set_debug_level(const char *val, struct kernel_param *kp)
+_scsih_set_debug_level(const char *val, struct kernel_param *kp)
 {
        int ret = param_set_int(val, kp);
        struct MPT2SAS_ADAPTER *ioc;
@@ -215,7 +215,7 @@ scsih_set_debug_level(const char *val, struct kernel_param *kp)
                ioc->logging_level = logging_level;
        return 0;
 }
-module_param_call(logging_level, scsih_set_debug_level, param_get_int,
+module_param_call(logging_level, _scsih_set_debug_level, param_get_int,
     &logging_level, 0644);
 
 /**
@@ -883,6 +883,41 @@ _scsih_scsi_lookup_find_by_target(struct MPT2SAS_ADAPTER *ioc, int id,
        return found;
 }
 
+/**
+ * _scsih_scsi_lookup_find_by_lun - search for matching channel:id:lun
+ * @ioc: per adapter object
+ * @id: target id
+ * @lun: lun number
+ * @channel: channel
+ * Context: This function will acquire ioc->scsi_lookup_lock.
+ *
+ * This will search for a matching channel:id:lun in the scsi_lookup array,
+ * returning 1 if found.
+ */
+static u8
+_scsih_scsi_lookup_find_by_lun(struct MPT2SAS_ADAPTER *ioc, int id,
+    unsigned int lun, int channel)
+{
+       u8 found;
+       unsigned long   flags;
+       int i;
+
+       spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
+       found = 0;
+       for (i = 0 ; i < ioc->request_depth; i++) {
+               if (ioc->scsi_lookup[i].scmd &&
+                   (ioc->scsi_lookup[i].scmd->device->id == id &&
+                   ioc->scsi_lookup[i].scmd->device->channel == channel &&
+                   ioc->scsi_lookup[i].scmd->device->lun == lun)) {
+                       found = 1;
+                       goto out;
+               }
+       }
+ out:
+       spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
+       return found;
+}
+
 /**
  * _scsih_get_chain_buffer_dma - obtain block of chains (dma address)
  * @ioc: per adapter object
@@ -1047,14 +1082,14 @@ _scsih_build_scatter_gather(struct MPT2SAS_ADAPTER *ioc,
 }
 
 /**
- * scsih_change_queue_depth - setting device queue depth
+ * _scsih_change_queue_depth - setting device queue depth
  * @sdev: scsi device struct
  * @qdepth: requested queue depth
  *
  * Returns queue depth.
  */
 static int
-scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
+_scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 {
        struct Scsi_Host *shost = sdev->host;
        int max_depth;
@@ -1079,14 +1114,14 @@ scsih_change_queue_depth(struct scsi_device *sdev, int qdepth)
 }
 
 /**
- * scsih_change_queue_depth - changing device queue tag type
+ * _scsih_change_queue_depth - changing device queue tag type
  * @sdev: scsi device struct
  * @tag_type: requested tag type
  *
  * Returns queue tag type.
  */
 static int
-scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
+_scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
 {
        if (sdev->tagged_supported) {
                scsi_set_tag_type(sdev, tag_type);
@@ -1101,14 +1136,14 @@ scsih_change_queue_type(struct scsi_device *sdev, int tag_type)
 }
 
 /**
- * scsih_target_alloc - target add routine
+ * _scsih_target_alloc - target add routine
  * @starget: scsi target struct
  *
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  */
 static int
-scsih_target_alloc(struct scsi_target *starget)
+_scsih_target_alloc(struct scsi_target *starget)
 {
        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1163,13 +1198,13 @@ scsih_target_alloc(struct scsi_target *starget)
 }
 
 /**
- * scsih_target_destroy - target destroy routine
+ * _scsih_target_destroy - target destroy routine
  * @starget: scsi target struct
  *
  * Returns nothing.
  */
 static void
-scsih_target_destroy(struct scsi_target *starget)
+_scsih_target_destroy(struct scsi_target *starget)
 {
        struct Scsi_Host *shost = dev_to_shost(&starget->dev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1212,14 +1247,14 @@ scsih_target_destroy(struct scsi_target *starget)
 }
 
 /**
- * scsih_slave_alloc - device add routine
+ * _scsih_slave_alloc - device add routine
  * @sdev: scsi device struct
  *
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  */
 static int
-scsih_slave_alloc(struct scsi_device *sdev)
+_scsih_slave_alloc(struct scsi_device *sdev)
 {
        struct Scsi_Host *shost;
        struct MPT2SAS_ADAPTER *ioc;
@@ -1273,13 +1308,13 @@ scsih_slave_alloc(struct scsi_device *sdev)
 }
 
 /**
- * scsih_slave_destroy - device destroy routine
+ * _scsih_slave_destroy - device destroy routine
  * @sdev: scsi device struct
  *
  * Returns nothing.
  */
 static void
-scsih_slave_destroy(struct scsi_device *sdev)
+_scsih_slave_destroy(struct scsi_device *sdev)
 {
        struct MPT2SAS_TARGET *sas_target_priv_data;
        struct scsi_target *starget;
@@ -1295,13 +1330,13 @@ scsih_slave_destroy(struct scsi_device *sdev)
 }
 
 /**
- * scsih_display_sata_capabilities - sata capabilities
+ * _scsih_display_sata_capabilities - sata capabilities
  * @ioc: per adapter object
  * @sas_device: the sas_device object
  * @sdev: scsi device struct
  */
 static void
-scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
+_scsih_display_sata_capabilities(struct MPT2SAS_ADAPTER *ioc,
     struct _sas_device *sas_device, struct scsi_device *sdev)
 {
        Mpi2ConfigReply_t mpi_reply;
@@ -1401,14 +1436,14 @@ _scsih_get_volume_capabilities(struct MPT2SAS_ADAPTER *ioc,
 }
 
 /**
- * scsih_slave_configure - device configure routine.
+ * _scsih_slave_configure - device configure routine.
  * @sdev: scsi device struct
  *
  * Returns 0 if ok. Any other return is assumed to be an error and
  * the device is ignored.
  */
 static int
-scsih_slave_configure(struct scsi_device *sdev)
+_scsih_slave_configure(struct scsi_device *sdev)
 {
        struct Scsi_Host *shost = sdev->host;
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1489,7 +1524,7 @@ scsih_slave_configure(struct scsi_device *sdev)
                    r_level, raid_device->handle,
                    (unsigned long long)raid_device->wwid,
                    raid_device->num_pds, ds);
-               scsih_change_queue_depth(sdev, qdepth);
+               _scsih_change_queue_depth(sdev, qdepth);
                return 0;
        }
 
@@ -1532,10 +1567,10 @@ scsih_slave_configure(struct scsi_device *sdev)
                    sas_device->slot);
 
                if (!ssp_target)
-                       scsih_display_sata_capabilities(ioc, sas_device, sdev);
+                       _scsih_display_sata_capabilities(ioc, sas_device, sdev);
        }
 
-       scsih_change_queue_depth(sdev, qdepth);
+       _scsih_change_queue_depth(sdev, qdepth);
 
        if (ssp_target)
                sas_read_port_mode_page(sdev);
@@ -1543,7 +1578,7 @@ scsih_slave_configure(struct scsi_device *sdev)
 }
 
 /**
- * scsih_bios_param - fetch head, sector, cylinder info for a disk
+ * _scsih_bios_param - fetch head, sector, cylinder info for a disk
  * @sdev: scsi device struct
  * @bdev: pointer to block device context
  * @capacity: device size (in 512 byte sectors)
@@ -1555,7 +1590,7 @@ scsih_slave_configure(struct scsi_device *sdev)
  * Return nothing.
  */
 static int
-scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
+_scsih_bios_param(struct scsi_device *sdev, struct block_device *bdev,
     sector_t capacity, int params[])
 {
        int             heads;
@@ -1636,7 +1671,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
 }
 
 /**
- * scsih_tm_done - tm completion routine
+ * _scsih_tm_done - tm completion routine
  * @ioc: per adapter object
  * @smid: system request message index
  * @VF_ID: virtual function id
@@ -1648,7 +1683,7 @@ _scsih_response_code(struct MPT2SAS_ADAPTER *ioc, u8 response_code)
  * Return nothing.
  */
 static void
-scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
+_scsih_tm_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
 {
        MPI2DefaultReply_t *mpi_reply;
 
@@ -1823,13 +1858,13 @@ mpt2sas_scsih_issue_tm(struct MPT2SAS_ADAPTER *ioc, u16 handle, uint lun,
 }
 
 /**
- * scsih_abort - eh threads main abort routine
+ * _scsih_abort - eh threads main abort routine
  * @sdev: scsi device struct
  *
  * Returns SUCCESS if command aborted else FAILED
  */
 static int
-scsih_abort(struct scsi_cmnd *scmd)
+_scsih_abort(struct scsi_cmnd *scmd)
 {
        struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
        struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1889,15 +1924,86 @@ scsih_abort(struct scsi_cmnd *scmd)
        return r;
 }
 
+/**
+ * _scsih_dev_reset - eh threads main device reset routine
+ * @sdev: scsi device struct
+ *
+ * Returns SUCCESS if command aborted else FAILED
+ */
+static int
+_scsih_dev_reset(struct scsi_cmnd *scmd)
+{
+       struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
+       struct MPT2SAS_DEVICE *sas_device_priv_data;
+       struct _sas_device *sas_device;
+       unsigned long flags;
+       u16     handle;
+       int r;
+
+       printk(MPT2SAS_INFO_FMT "attempting device reset! scmd(%p)\n",
+           ioc->name, scmd);
+       scsi_print_command(scmd);
+
+       sas_device_priv_data = scmd->device->hostdata;
+       if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
+               printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
+                   ioc->name, scmd);
+               scmd->result = DID_NO_CONNECT << 16;
+               scmd->scsi_done(scmd);
+               r = SUCCESS;
+               goto out;
+       }
+
+       /* for hidden raid components obtain the volume_handle */
+       handle = 0;
+       if (sas_device_priv_data->sas_target->flags &
+           MPT_TARGET_FLAGS_RAID_COMPONENT) {
+               spin_lock_irqsave(&ioc->sas_device_lock, flags);
+               sas_device = _scsih_sas_device_find_by_handle(ioc,
+                  sas_device_priv_data->sas_target->handle);
+               if (sas_device)
+                       handle = sas_device->volume_handle;
+               spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
+       } else
+               handle = sas_device_priv_data->sas_target->handle;
+
+       if (!handle) {
+               scmd->result = DID_RESET << 16;
+               r = FAILED;
+               goto out;
+       }
+
+       mutex_lock(&ioc->tm_cmds.mutex);
+       mpt2sas_scsih_issue_tm(ioc, handle, 0,
+           MPI2_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET, scmd->device->lun,
+           30);
+
+       /*
+        *  sanity check see whether all commands to this device been
+        *  completed
+        */
+       if (_scsih_scsi_lookup_find_by_lun(ioc, scmd->device->id,
+           scmd->device->lun, scmd->device->channel))
+               r = FAILED;
+       else
+               r = SUCCESS;
+       ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
+       mutex_unlock(&ioc->tm_cmds.mutex);
+
+ out:
+       printk(MPT2SAS_INFO_FMT "device reset: %s scmd(%p)\n",
+           ioc->name, ((r == SUCCESS) ? "SUCCESS" : "FAILED"), scmd);
+       return r;
+}
 
 /**
- * scsih_dev_reset - eh threads main device reset routine
+ * _scsih_target_reset - eh threads main target reset routine
  * @sdev: scsi device struct
  *
  * Returns SUCCESS if command aborted else FAILED
  */
 static int
-scsih_dev_reset(struct scsi_cmnd *scmd)
+_scsih_target_reset(struct scsi_cmnd *scmd)
 {
        struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
        struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -1912,7 +2018,7 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
 
        sas_device_priv_data = scmd->device->hostdata;
        if (!sas_device_priv_data || !sas_device_priv_data->sas_target) {
-               printk(MPT2SAS_INFO_FMT "device been deleted! scmd(%p)\n",
+               printk(MPT2SAS_INFO_FMT "target been deleted! scmd(%p)\n",
                    ioc->name, scmd);
                scmd->result = DID_NO_CONNECT << 16;
                scmd->scsi_done(scmd);
@@ -1962,13 +2068,13 @@ scsih_dev_reset(struct scsi_cmnd *scmd)
 }
 
 /**
- * scsih_abort - eh threads main host reset routine
+ * _scsih_abort - eh threads main host reset routine
  * @sdev: scsi device struct
  *
  * Returns SUCCESS if command aborted else FAILED
  */
 static int
-scsih_host_reset(struct scsi_cmnd *scmd)
+_scsih_host_reset(struct scsi_cmnd *scmd)
 {
        struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
        int r, retval;
@@ -2390,7 +2496,107 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
 }
 
 /**
- * scsih_qcmd - main scsi request entry point
+ * _scsih_setup_eedp - setup MPI request for EEDP transfer
+ * @scmd: pointer to scsi command object
+ * @mpi_request: pointer to the SCSI_IO reqest message frame
+ *
+ * Supporting protection 1 and 3.
+ *
+ * Returns nothing
+ */
+static void
+_scsih_setup_eedp(struct scsi_cmnd *scmd, Mpi2SCSIIORequest_t *mpi_request)
+{
+       u16 eedp_flags;
+       unsigned char prot_op = scsi_get_prot_op(scmd);
+       unsigned char prot_type = scsi_get_prot_type(scmd);
+
+       if (prot_type == SCSI_PROT_DIF_TYPE0 ||
+          prot_type == SCSI_PROT_DIF_TYPE2 ||
+          prot_op == SCSI_PROT_NORMAL)
+               return;
+
+       if (prot_op ==  SCSI_PROT_READ_STRIP)
+               eedp_flags = MPI2_SCSIIO_EEDPFLAGS_CHECK_REMOVE_OP;
+       else if (prot_op ==  SCSI_PROT_WRITE_INSERT)
+               eedp_flags = MPI2_SCSIIO_EEDPFLAGS_INSERT_OP;
+       else
+               return;
+
+       mpi_request->EEDPBlockSize = scmd->device->sector_size;
+
+       switch (prot_type) {
+       case SCSI_PROT_DIF_TYPE1:
+
+               /*
+               * enable ref/guard checking
+               * auto increment ref tag
+               */
+               mpi_request->EEDPFlags = eedp_flags |
+                   MPI2_SCSIIO_EEDPFLAGS_INC_PRI_REFTAG |
+                   MPI2_SCSIIO_EEDPFLAGS_CHECK_REFTAG |
+                   MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+               mpi_request->CDB.EEDP32.PrimaryReferenceTag =
+                   cpu_to_be32(scsi_get_lba(scmd));
+
+               break;
+
+       case SCSI_PROT_DIF_TYPE3:
+
+               /*
+               * enable guard checking
+               */
+               mpi_request->EEDPFlags = eedp_flags |
+                   MPI2_SCSIIO_EEDPFLAGS_CHECK_GUARD;
+
+               break;
+       }
+}
+
+/**
+ * _scsih_eedp_error_handling - return sense code for EEDP errors
+ * @scmd: pointer to scsi command object
+ * @ioc_status: ioc status
+ *
+ * Returns nothing
+ */
+static void
+_scsih_eedp_error_handling(struct scsi_cmnd *scmd, u16 ioc_status)
+{
+       u8 ascq;
+       u8 sk;
+       u8 host_byte;
+
+       switch (ioc_status) {
+       case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+               ascq = 0x01;
+               break;
+       case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+               ascq = 0x02;
+               break;
+       case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+               ascq = 0x03;
+               break;
+       default:
+               ascq = 0x00;
+               break;
+       }
+
+       if (scmd->sc_data_direction == DMA_TO_DEVICE) {
+               sk = ILLEGAL_REQUEST;
+               host_byte = DID_ABORT;
+       } else {
+               sk = ABORTED_COMMAND;
+               host_byte = DID_OK;
+       }
+
+       scsi_build_sense_buffer(0, scmd->sense_buffer, sk, 0x10, ascq);
+       scmd->result = DRIVER_SENSE << 24 | (host_byte << 16) |
+           SAM_STAT_CHECK_CONDITION;
+}
+
+/**
+ * _scsih_qcmd - main scsi request entry point
  * @scmd: pointer to scsi command object
  * @done: function pointer to be invoked on completion
  *
@@ -2401,7 +2607,7 @@ mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
  * SCSI_MLQUEUE_HOST_BUSY if the entire host queue is full
  */
 static int
-scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
+_scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
 {
        struct MPT2SAS_ADAPTER *ioc = shost_priv(scmd->device->host);
        struct MPT2SAS_DEVICE *sas_device_priv_data;
@@ -2470,6 +2676,7 @@ scsih_qcmd(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
        }
        mpi_request = mpt2sas_base_get_msg_frame(ioc, smid);
        memset(mpi_request, 0, sizeof(Mpi2SCSIIORequest_t));
+       _scsih_setup_eedp(scmd, mpi_request);
        mpi_request->Function = MPI2_FUNCTION_SCSI_IO_REQUEST;
        if (sas_device_priv_data->sas_target->flags &
            MPT_TARGET_FLAGS_RAID_COMPONENT)
@@ -2604,6 +2811,15 @@ _scsih_scsi_ioc_info(struct MPT2SAS_ADAPTER *ioc, struct scsi_cmnd *scmd,
        case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
                desc_ioc_state = "scsi ext terminated";
                break;
+       case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+               desc_ioc_state = "eedp guard error";
+               break;
+       case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+               desc_ioc_state = "eedp ref tag error";
+               break;
+       case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+               desc_ioc_state = "eedp app tag error";
+               break;
        default:
                desc_ioc_state = "unknown";
                break;
@@ -2783,7 +2999,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 }
 
 /**
- * scsih_io_done - scsi request callback
+ * _scsih_io_done - scsi request callback
  * @ioc: per adapter object
  * @smid: system request message index
  * @VF_ID: virtual function id
@@ -2794,7 +3010,7 @@ _scsih_smart_predicted_fault(struct MPT2SAS_ADAPTER *ioc, u16 handle)
  * Return nothing.
  */
 static void
-scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
+_scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
 {
        Mpi2SCSIIORequest_t *mpi_request;
        Mpi2SCSIIOReply_t *mpi_reply;
@@ -2939,6 +3155,11 @@ scsih_io_done(struct MPT2SAS_ADAPTER *ioc, u16 smid, u8 VF_ID, u32 reply)
                        scmd->result = DID_RESET << 16;
                break;
 
+       case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
+       case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
+       case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
+               _scsih_eedp_error_handling(scmd, ioc_status);
+               break;
        case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
        case MPI2_IOCSTATUS_INVALID_FUNCTION:
        case MPI2_IOCSTATUS_INVALID_SGL:
@@ -5130,18 +5351,19 @@ static struct scsi_host_template scsih_driver_template = {
        .module                         = THIS_MODULE,
        .name                           = "Fusion MPT SAS Host",
        .proc_name                      = MPT2SAS_DRIVER_NAME,
-       .queuecommand                   = scsih_qcmd,
-       .target_alloc                   = scsih_target_alloc,
-       .slave_alloc                    = scsih_slave_alloc,
-       .slave_configure                = scsih_slave_configure,
-       .target_destroy                 = scsih_target_destroy,
-       .slave_destroy                  = scsih_slave_destroy,
-       .change_queue_depth             = scsih_change_queue_depth,
-       .change_queue_type              = scsih_change_queue_type,
-       .eh_abort_handler               = scsih_abort,
-       .eh_device_reset_handler        = scsih_dev_reset,
-       .eh_host_reset_handler          = scsih_host_reset,
-       .bios_param                     = scsih_bios_param,
+       .queuecommand                   = _scsih_qcmd,
+       .target_alloc                   = _scsih_target_alloc,
+       .slave_alloc                    = _scsih_slave_alloc,
+       .slave_configure                = _scsih_slave_configure,
+       .target_destroy                 = _scsih_target_destroy,
+       .slave_destroy                  = _scsih_slave_destroy,
+       .change_queue_depth             = _scsih_change_queue_depth,
+       .change_queue_type              = _scsih_change_queue_type,
+       .eh_abort_handler               = _scsih_abort,
+       .eh_device_reset_handler        = _scsih_dev_reset,
+       .eh_target_reset_handler        = _scsih_target_reset,
+       .eh_host_reset_handler          = _scsih_host_reset,
+       .bios_param                     = _scsih_bios_param,
        .can_queue                      = 1,
        .this_id                        = -1,
        .sg_tablesize                   = MPT2SAS_SG_DEPTH,
@@ -5228,13 +5450,13 @@ _scsih_expander_node_remove(struct MPT2SAS_ADAPTER *ioc,
 }
 
 /**
- * scsih_remove - detach and remove add host
+ * _scsih_remove - detach and remove add host
  * @pdev: PCI device struct
  *
  * Return nothing.
  */
 static void __devexit
-scsih_remove(struct pci_dev *pdev)
+_scsih_remove(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5442,14 +5664,14 @@ _scsih_probe_devices(struct MPT2SAS_ADAPTER *ioc)
 }
 
 /**
- * scsih_probe - attach and add scsi host
+ * _scsih_probe - attach and add scsi host
  * @pdev: PCI device struct
  * @id: pci device id
  *
  * Returns 0 success, anything else error.
  */
 static int
-scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+_scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
        struct MPT2SAS_ADAPTER *ioc;
        struct Scsi_Host *shost;
@@ -5503,6 +5725,9 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                goto out_add_shost_fail;
        }
 
+       scsi_host_set_prot(shost, SHOST_DIF_TYPE1_PROTECTION
+           | SHOST_DIF_TYPE3_PROTECTION);
+
        /* event thread */
        snprintf(ioc->firmware_event_name, sizeof(ioc->firmware_event_name),
            "fw_event%d", ioc->id);
@@ -5536,14 +5761,14 @@ scsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 
 #ifdef CONFIG_PM
 /**
- * scsih_suspend - power management suspend main entry point
+ * _scsih_suspend - power management suspend main entry point
  * @pdev: PCI device struct
  * @state: PM state change to (usually PCI_D3)
  *
  * Returns 0 success, anything else error.
  */
 static int
-scsih_suspend(struct pci_dev *pdev, pm_message_t state)
+_scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5564,13 +5789,13 @@ scsih_suspend(struct pci_dev *pdev, pm_message_t state)
 }
 
 /**
- * scsih_resume - power management resume main entry point
+ * _scsih_resume - power management resume main entry point
  * @pdev: PCI device struct
  *
  * Returns 0 success, anything else error.
  */
 static int
-scsih_resume(struct pci_dev *pdev)
+_scsih_resume(struct pci_dev *pdev)
 {
        struct Scsi_Host *shost = pci_get_drvdata(pdev);
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -5599,22 +5824,22 @@ scsih_resume(struct pci_dev *pdev)
 static struct pci_driver scsih_driver = {
        .name           = MPT2SAS_DRIVER_NAME,
        .id_table       = scsih_pci_table,
-       .probe          = scsih_probe,
-       .remove         = __devexit_p(scsih_remove),
+       .probe          = _scsih_probe,
+       .remove         = __devexit_p(_scsih_remove),
 #ifdef CONFIG_PM
-       .suspend        = scsih_suspend,
-       .resume         = scsih_resume,
+       .suspend        = _scsih_suspend,
+       .resume         = _scsih_resume,
 #endif
 };
 
 
 /**
- * scsih_init - main entry point for this driver.
+ * _scsih_init - main entry point for this driver.
  *
  * Returns 0 success, anything else error.
  */
 static int __init
-scsih_init(void)
+_scsih_init(void)
 {
        int error;
 
@@ -5630,10 +5855,10 @@ scsih_init(void)
        mpt2sas_base_initialize_callback_handler();
 
         /* queuecommand callback hander */
-       scsi_io_cb_idx = mpt2sas_base_register_callback_handler(scsih_io_done);
+       scsi_io_cb_idx = mpt2sas_base_register_callback_handler(_scsih_io_done);
 
        /* task managment callback handler */
-       tm_cb_idx = mpt2sas_base_register_callback_handler(scsih_tm_done);
+       tm_cb_idx = mpt2sas_base_register_callback_handler(_scsih_tm_done);
 
        /* base internal commands callback handler */
        base_cb_idx = mpt2sas_base_register_callback_handler(mpt2sas_base_done);
@@ -5659,12 +5884,12 @@ scsih_init(void)
 }
 
 /**
- * scsih_exit - exit point for this driver (when it is a module).
+ * _scsih_exit - exit point for this driver (when it is a module).
  *
  * Returns 0 success, anything else error.
  */
 static void __exit
-scsih_exit(void)
+_scsih_exit(void)
 {
        printk(KERN_INFO "mpt2sas version %s unloading\n",
            MPT2SAS_DRIVER_VERSION);
@@ -5682,5 +5907,5 @@ scsih_exit(void)
        mpt2sas_ctl_exit();
 }
 
-module_init(scsih_init);
-module_exit(scsih_exit);
+module_init(_scsih_init);
+module_exit(_scsih_exit);
index 5c65da519e39af511334816c18977912a4920637..686695b155c7af9899864c5d3103e3cfd49624b8 100644 (file)
@@ -264,7 +264,7 @@ struct rep_manu_reply{
 };
 
 /**
- * transport_expander_report_manufacture - obtain SMP report_manufacture
+ * _transport_expander_report_manufacture - obtain SMP report_manufacture
  * @ioc: per adapter object
  * @sas_address: expander sas address
  * @edev: the sas_expander_device object
@@ -274,7 +274,7 @@ struct rep_manu_reply{
  * Returns 0 for success, non-zero for failure.
  */
 static int
-transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
+_transport_expander_report_manufacture(struct MPT2SAS_ADAPTER *ioc,
     u64 sas_address, struct sas_expander_device *edev)
 {
        Mpi2SmpPassthroughRequest_t *mpi_request;
@@ -578,7 +578,7 @@ mpt2sas_transport_port_add(struct MPT2SAS_ADAPTER *ioc, u16 handle,
            MPI2_SAS_DEVICE_INFO_EDGE_EXPANDER ||
            mpt2sas_port->remote_identify.device_type ==
            MPI2_SAS_DEVICE_INFO_FANOUT_EXPANDER)
-               transport_expander_report_manufacture(ioc,
+               _transport_expander_report_manufacture(ioc,
                    mpt2sas_port->remote_identify.sas_address,
                    rphy_to_expander_device(rphy));
 
@@ -852,7 +852,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
 }
 
 /**
- * transport_get_linkerrors -
+ * _transport_get_linkerrors -
  * @phy: The sas phy object
  *
  * Only support sas_host direct attached phys.
@@ -860,7 +860,7 @@ rphy_to_ioc(struct sas_rphy *rphy)
  *
  */
 static int
-transport_get_linkerrors(struct sas_phy *phy)
+_transport_get_linkerrors(struct sas_phy *phy)
 {
        struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
        struct _sas_phy *mpt2sas_phy;
@@ -903,14 +903,14 @@ transport_get_linkerrors(struct sas_phy *phy)
 }
 
 /**
- * transport_get_enclosure_identifier -
+ * _transport_get_enclosure_identifier -
  * @phy: The sas phy object
  *
  * Obtain the enclosure logical id for an expander.
  * Returns 0 for success, non-zero for failure.
  */
 static int
-transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
+_transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 {
        struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
        struct _sas_node *sas_expander;
@@ -929,13 +929,13 @@ transport_get_enclosure_identifier(struct sas_rphy *rphy, u64 *identifier)
 }
 
 /**
- * transport_get_bay_identifier -
+ * _transport_get_bay_identifier -
  * @phy: The sas phy object
  *
  * Returns the slot id for a device that resides inside an enclosure.
  */
 static int
-transport_get_bay_identifier(struct sas_rphy *rphy)
+_transport_get_bay_identifier(struct sas_rphy *rphy)
 {
        struct MPT2SAS_ADAPTER *ioc = rphy_to_ioc(rphy);
        struct _sas_device *sas_device;
@@ -953,7 +953,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
 }
 
 /**
- * transport_phy_reset -
+ * _transport_phy_reset -
  * @phy: The sas phy object
  * @hard_reset:
  *
@@ -961,7 +961,7 @@ transport_get_bay_identifier(struct sas_rphy *rphy)
  * Returns 0 for success, non-zero for failure.
  */
 static int
-transport_phy_reset(struct sas_phy *phy, int hard_reset)
+_transport_phy_reset(struct sas_phy *phy, int hard_reset)
 {
        struct MPT2SAS_ADAPTER *ioc = phy_to_ioc(phy);
        struct _sas_phy *mpt2sas_phy;
@@ -1002,7 +1002,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
 }
 
 /**
- * transport_smp_handler - transport portal for smp passthru
+ * _transport_smp_handler - transport portal for smp passthru
  * @shost: shost object
  * @rphy: sas transport rphy object
  * @req:
@@ -1012,7 +1012,7 @@ transport_phy_reset(struct sas_phy *phy, int hard_reset)
  *           smp_rep_general /sys/class/bsg/expander-5:0
  */
 static int
-transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
+_transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
     struct request *req)
 {
        struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
@@ -1200,11 +1200,11 @@ transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
 }
 
 struct sas_function_template mpt2sas_transport_functions = {
-       .get_linkerrors         = transport_get_linkerrors,
-       .get_enclosure_identifier = transport_get_enclosure_identifier,
-       .get_bay_identifier     = transport_get_bay_identifier,
-       .phy_reset              = transport_phy_reset,
-       .smp_handler            = transport_smp_handler,
+       .get_linkerrors         = _transport_get_linkerrors,
+       .get_enclosure_identifier = _transport_get_enclosure_identifier,
+       .get_bay_identifier     = _transport_get_bay_identifier,
+       .phy_reset              = _transport_phy_reset,
+       .smp_handler            = _transport_smp_handler,
 };
 
 struct scsi_transport_template *mpt2sas_transport_template;
diff --git a/drivers/scsi/mvsas.c b/drivers/scsi/mvsas.c
deleted file mode 100644 (file)
index e4acebd..0000000
+++ /dev/null
@@ -1,3222 +0,0 @@
-/*
-       mvsas.c - Marvell 88SE6440 SAS/SATA support
-
-       Copyright 2007 Red Hat, Inc.
-       Copyright 2008 Marvell. <kewei@marvell.com>
-
-       This program is free software; you can redistribute it and/or
-       modify it under the terms of the GNU General Public License as
-       published by the Free Software Foundation; either version 2,
-       or (at your option) any later version.
-
-       This program is distributed in the hope that it will be useful,
-       but WITHOUT ANY WARRANTY; without even the implied warranty
-       of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
-       See the GNU General Public License for more details.
-
-       You should have received a copy of the GNU General Public
-       License along with this program; see the file COPYING.  If not,
-       write to the Free Software Foundation, 675 Mass Ave, Cambridge,
-       MA 02139, USA.
-
-       ---------------------------------------------------------------
-
-       Random notes:
-       * hardware supports controlling the endian-ness of data
-         structures.  this permits elimination of all the le32_to_cpu()
-         and cpu_to_le32() conversions.
-
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/ctype.h>
-#include <scsi/libsas.h>
-#include <scsi/scsi_tcq.h>
-#include <scsi/sas_ata.h>
-#include <asm/io.h>
-
-#define DRV_NAME       "mvsas"
-#define DRV_VERSION    "0.5.2"
-#define _MV_DUMP       0
-#define MVS_DISABLE_NVRAM
-#define MVS_DISABLE_MSI
-
-#define mr32(reg)      readl(regs + MVS_##reg)
-#define mw32(reg,val)  writel((val), regs + MVS_##reg)
-#define mw32_f(reg,val)        do {                    \
-       writel((val), regs + MVS_##reg);        \
-       readl(regs + MVS_##reg);                \
-       } while (0)
-
-#define MVS_ID_NOT_MAPPED      0x7f
-#define MVS_CHIP_SLOT_SZ       (1U << mvi->chip->slot_width)
-
-/* offset for D2H FIS in the Received FIS List Structure */
-#define SATA_RECEIVED_D2H_FIS(reg_set) \
-       ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x40)
-#define SATA_RECEIVED_PIO_FIS(reg_set) \
-       ((void *) mvi->rx_fis + 0x400 + 0x100 * reg_set + 0x20)
-#define UNASSOC_D2H_FIS(id)            \
-       ((void *) mvi->rx_fis + 0x100 * id)
-
-#define for_each_phy(__lseq_mask, __mc, __lseq, __rest)                        \
-       for ((__mc) = (__lseq_mask), (__lseq) = 0;                      \
-                                       (__mc) != 0 && __rest;          \
-                                       (++__lseq), (__mc) >>= 1)
-
-/* driver compile-time configuration */
-enum driver_configuration {
-       MVS_TX_RING_SZ          = 1024, /* TX ring size (12-bit) */
-       MVS_RX_RING_SZ          = 1024, /* RX ring size (12-bit) */
-                                       /* software requires power-of-2
-                                          ring size */
-
-       MVS_SLOTS               = 512,  /* command slots */
-       MVS_SLOT_BUF_SZ         = 8192, /* cmd tbl + IU + status + PRD */
-       MVS_SSP_CMD_SZ          = 64,   /* SSP command table buffer size */
-       MVS_ATA_CMD_SZ          = 96,   /* SATA command table buffer size */
-       MVS_OAF_SZ              = 64,   /* Open address frame buffer size */
-
-       MVS_RX_FIS_COUNT        = 17,   /* Optional rx'd FISs (max 17) */
-
-       MVS_QUEUE_SIZE          = 30,   /* Support Queue depth */
-       MVS_CAN_QUEUE           = MVS_SLOTS - 1,        /* SCSI Queue depth */
-};
-
-/* unchangeable hardware details */
-enum hardware_details {
-       MVS_MAX_PHYS            = 8,    /* max. possible phys */
-       MVS_MAX_PORTS           = 8,    /* max. possible ports */
-       MVS_RX_FISL_SZ          = 0x400 + (MVS_RX_FIS_COUNT * 0x100),
-};
-
-/* peripheral registers (BAR2) */
-enum peripheral_registers {
-       SPI_CTL                 = 0x10, /* EEPROM control */
-       SPI_CMD                 = 0x14, /* EEPROM command */
-       SPI_DATA                = 0x18, /* EEPROM data */
-};
-
-enum peripheral_register_bits {
-       TWSI_RDY                = (1U << 7),    /* EEPROM interface ready */
-       TWSI_RD                 = (1U << 4),    /* EEPROM read access */
-
-       SPI_ADDR_MASK           = 0x3ffff,      /* bits 17:0 */
-};
-
-/* enhanced mode registers (BAR4) */
-enum hw_registers {
-       MVS_GBL_CTL             = 0x04,  /* global control */
-       MVS_GBL_INT_STAT        = 0x08,  /* global irq status */
-       MVS_GBL_PI              = 0x0C,  /* ports implemented bitmask */
-       MVS_GBL_PORT_TYPE       = 0xa0,  /* port type */
-
-       MVS_CTL                 = 0x100, /* SAS/SATA port configuration */
-       MVS_PCS                 = 0x104, /* SAS/SATA port control/status */
-       MVS_CMD_LIST_LO         = 0x108, /* cmd list addr */
-       MVS_CMD_LIST_HI         = 0x10C,
-       MVS_RX_FIS_LO           = 0x110, /* RX FIS list addr */
-       MVS_RX_FIS_HI           = 0x114,
-
-       MVS_TX_CFG              = 0x120, /* TX configuration */
-       MVS_TX_LO               = 0x124, /* TX (delivery) ring addr */
-       MVS_TX_HI               = 0x128,
-
-       MVS_TX_PROD_IDX         = 0x12C, /* TX producer pointer */
-       MVS_TX_CONS_IDX         = 0x130, /* TX consumer pointer (RO) */
-       MVS_RX_CFG              = 0x134, /* RX configuration */
-       MVS_RX_LO               = 0x138, /* RX (completion) ring addr */
-       MVS_RX_HI               = 0x13C,
-       MVS_RX_CONS_IDX         = 0x140, /* RX consumer pointer (RO) */
-
-       MVS_INT_COAL            = 0x148, /* Int coalescing config */
-       MVS_INT_COAL_TMOUT      = 0x14C, /* Int coalescing timeout */
-       MVS_INT_STAT            = 0x150, /* Central int status */
-       MVS_INT_MASK            = 0x154, /* Central int enable */
-       MVS_INT_STAT_SRS        = 0x158, /* SATA register set status */
-       MVS_INT_MASK_SRS        = 0x15C,
-
-                                        /* ports 1-3 follow after this */
-       MVS_P0_INT_STAT         = 0x160, /* port0 interrupt status */
-       MVS_P0_INT_MASK         = 0x164, /* port0 interrupt mask */
-       MVS_P4_INT_STAT         = 0x200, /* Port 4 interrupt status */
-       MVS_P4_INT_MASK         = 0x204, /* Port 4 interrupt enable mask */
-
-                                        /* ports 1-3 follow after this */
-       MVS_P0_SER_CTLSTAT      = 0x180, /* port0 serial control/status */
-       MVS_P4_SER_CTLSTAT      = 0x220, /* port4 serial control/status */
-
-       MVS_CMD_ADDR            = 0x1B8, /* Command register port (addr) */
-       MVS_CMD_DATA            = 0x1BC, /* Command register port (data) */
-
-                                        /* ports 1-3 follow after this */
-       MVS_P0_CFG_ADDR         = 0x1C0, /* port0 phy register address */
-       MVS_P0_CFG_DATA         = 0x1C4, /* port0 phy register data */
-       MVS_P4_CFG_ADDR         = 0x230, /* Port 4 config address */
-       MVS_P4_CFG_DATA         = 0x234, /* Port 4 config data */
-
-                                        /* ports 1-3 follow after this */
-       MVS_P0_VSR_ADDR         = 0x1E0, /* port0 VSR address */
-       MVS_P0_VSR_DATA         = 0x1E4, /* port0 VSR data */
-       MVS_P4_VSR_ADDR         = 0x250, /* port 4 VSR addr */
-       MVS_P4_VSR_DATA         = 0x254, /* port 4 VSR data */
-};
-
-enum hw_register_bits {
-       /* MVS_GBL_CTL */
-       INT_EN                  = (1U << 1),    /* Global int enable */
-       HBA_RST                 = (1U << 0),    /* HBA reset */
-
-       /* MVS_GBL_INT_STAT */
-       INT_XOR                 = (1U << 4),    /* XOR engine event */
-       INT_SAS_SATA            = (1U << 0),    /* SAS/SATA event */
-
-       /* MVS_GBL_PORT_TYPE */                 /* shl for ports 1-3 */
-       SATA_TARGET             = (1U << 16),   /* port0 SATA target enable */
-       MODE_AUTO_DET_PORT7 = (1U << 15),       /* port0 SAS/SATA autodetect */
-       MODE_AUTO_DET_PORT6 = (1U << 14),
-       MODE_AUTO_DET_PORT5 = (1U << 13),
-       MODE_AUTO_DET_PORT4 = (1U << 12),
-       MODE_AUTO_DET_PORT3 = (1U << 11),
-       MODE_AUTO_DET_PORT2 = (1U << 10),
-       MODE_AUTO_DET_PORT1 = (1U << 9),
-       MODE_AUTO_DET_PORT0 = (1U << 8),
-       MODE_AUTO_DET_EN    =   MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
-                               MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
-                               MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
-                               MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
-       MODE_SAS_PORT7_MASK = (1U << 7),  /* port0 SAS(1), SATA(0) mode */
-       MODE_SAS_PORT6_MASK = (1U << 6),
-       MODE_SAS_PORT5_MASK = (1U << 5),
-       MODE_SAS_PORT4_MASK = (1U << 4),
-       MODE_SAS_PORT3_MASK = (1U << 3),
-       MODE_SAS_PORT2_MASK = (1U << 2),
-       MODE_SAS_PORT1_MASK = (1U << 1),
-       MODE_SAS_PORT0_MASK = (1U << 0),
-       MODE_SAS_SATA   =       MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
-                               MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
-                               MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
-                               MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
-
-                               /* SAS_MODE value may be
-                                * dictated (in hw) by values
-                                * of SATA_TARGET & AUTO_DET
-                                */
-
-       /* MVS_TX_CFG */
-       TX_EN                   = (1U << 16),   /* Enable TX */
-       TX_RING_SZ_MASK         = 0xfff,        /* TX ring size, bits 11:0 */
-
-       /* MVS_RX_CFG */
-       RX_EN                   = (1U << 16),   /* Enable RX */
-       RX_RING_SZ_MASK         = 0xfff,        /* RX ring size, bits 11:0 */
-
-       /* MVS_INT_COAL */
-       COAL_EN                 = (1U << 16),   /* Enable int coalescing */
-
-       /* MVS_INT_STAT, MVS_INT_MASK */
-       CINT_I2C                = (1U << 31),   /* I2C event */
-       CINT_SW0                = (1U << 30),   /* software event 0 */
-       CINT_SW1                = (1U << 29),   /* software event 1 */
-       CINT_PRD_BC             = (1U << 28),   /* PRD BC err for read cmd */
-       CINT_DMA_PCIE           = (1U << 27),   /* DMA to PCIE timeout */
-       CINT_MEM                = (1U << 26),   /* int mem parity err */
-       CINT_I2C_SLAVE          = (1U << 25),   /* slave I2C event */
-       CINT_SRS                = (1U << 3),    /* SRS event */
-       CINT_CI_STOP            = (1U << 1),    /* cmd issue stopped */
-       CINT_DONE               = (1U << 0),    /* cmd completion */
-
-                                               /* shl for ports 1-3 */
-       CINT_PORT_STOPPED       = (1U << 16),   /* port0 stopped */
-       CINT_PORT               = (1U << 8),    /* port0 event */
-       CINT_PORT_MASK_OFFSET   = 8,
-       CINT_PORT_MASK          = (0xFF << CINT_PORT_MASK_OFFSET),
-
-       /* TX (delivery) ring bits */
-       TXQ_CMD_SHIFT           = 29,
-       TXQ_CMD_SSP             = 1,            /* SSP protocol */
-       TXQ_CMD_SMP             = 2,            /* SMP protocol */
-       TXQ_CMD_STP             = 3,            /* STP/SATA protocol */
-       TXQ_CMD_SSP_FREE_LIST   = 4,            /* add to SSP targ free list */
-       TXQ_CMD_SLOT_RESET      = 7,            /* reset command slot */
-       TXQ_MODE_I              = (1U << 28),   /* mode: 0=target,1=initiator */
-       TXQ_PRIO_HI             = (1U << 27),   /* priority: 0=normal, 1=high */
-       TXQ_SRS_SHIFT           = 20,           /* SATA register set */
-       TXQ_SRS_MASK            = 0x7f,
-       TXQ_PHY_SHIFT           = 12,           /* PHY bitmap */
-       TXQ_PHY_MASK            = 0xff,
-       TXQ_SLOT_MASK           = 0xfff,        /* slot number */
-
-       /* RX (completion) ring bits */
-       RXQ_GOOD                = (1U << 23),   /* Response good */
-       RXQ_SLOT_RESET          = (1U << 21),   /* Slot reset complete */
-       RXQ_CMD_RX              = (1U << 20),   /* target cmd received */
-       RXQ_ATTN                = (1U << 19),   /* attention */
-       RXQ_RSP                 = (1U << 18),   /* response frame xfer'd */
-       RXQ_ERR                 = (1U << 17),   /* err info rec xfer'd */
-       RXQ_DONE                = (1U << 16),   /* cmd complete */
-       RXQ_SLOT_MASK           = 0xfff,        /* slot number */
-
-       /* mvs_cmd_hdr bits */
-       MCH_PRD_LEN_SHIFT       = 16,           /* 16-bit PRD table len */
-       MCH_SSP_FR_TYPE_SHIFT   = 13,           /* SSP frame type */
-
-                                               /* SSP initiator only */
-       MCH_SSP_FR_CMD          = 0x0,          /* COMMAND frame */
-
-                                               /* SSP initiator or target */
-       MCH_SSP_FR_TASK         = 0x1,          /* TASK frame */
-
-                                               /* SSP target only */
-       MCH_SSP_FR_XFER_RDY     = 0x4,          /* XFER_RDY frame */
-       MCH_SSP_FR_RESP         = 0x5,          /* RESPONSE frame */
-       MCH_SSP_FR_READ         = 0x6,          /* Read DATA frame(s) */
-       MCH_SSP_FR_READ_RESP    = 0x7,          /* ditto, plus RESPONSE */
-
-       MCH_PASSTHRU            = (1U << 12),   /* pass-through (SSP) */
-       MCH_FBURST              = (1U << 11),   /* first burst (SSP) */
-       MCH_CHK_LEN             = (1U << 10),   /* chk xfer len (SSP) */
-       MCH_RETRY               = (1U << 9),    /* tport layer retry (SSP) */
-       MCH_PROTECTION          = (1U << 8),    /* protection info rec (SSP) */
-       MCH_RESET               = (1U << 7),    /* Reset (STP/SATA) */
-       MCH_FPDMA               = (1U << 6),    /* First party DMA (STP/SATA) */
-       MCH_ATAPI               = (1U << 5),    /* ATAPI (STP/SATA) */
-       MCH_BIST                = (1U << 4),    /* BIST activate (STP/SATA) */
-       MCH_PMP_MASK            = 0xf,          /* PMP from cmd FIS (STP/SATA)*/
-
-       CCTL_RST                = (1U << 5),    /* port logic reset */
-
-                                               /* 0(LSB first), 1(MSB first) */
-       CCTL_ENDIAN_DATA        = (1U << 3),    /* PRD data */
-       CCTL_ENDIAN_RSP         = (1U << 2),    /* response frame */
-       CCTL_ENDIAN_OPEN        = (1U << 1),    /* open address frame */
-       CCTL_ENDIAN_CMD         = (1U << 0),    /* command table */
-
-       /* MVS_Px_SER_CTLSTAT (per-phy control) */
-       PHY_SSP_RST             = (1U << 3),    /* reset SSP link layer */
-       PHY_BCAST_CHG           = (1U << 2),    /* broadcast(change) notif */
-       PHY_RST_HARD            = (1U << 1),    /* hard reset + phy reset */
-       PHY_RST                 = (1U << 0),    /* phy reset */
-       PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
-       PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
-       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
-       PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
-                       (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
-       PHY_READY_MASK          = (1U << 20),
-
-       /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
-       PHYEV_DEC_ERR           = (1U << 24),   /* Phy Decoding Error */
-       PHYEV_UNASSOC_FIS       = (1U << 19),   /* unassociated FIS rx'd */
-       PHYEV_AN                = (1U << 18),   /* SATA async notification */
-       PHYEV_BIST_ACT          = (1U << 17),   /* BIST activate FIS */
-       PHYEV_SIG_FIS           = (1U << 16),   /* signature FIS */
-       PHYEV_POOF              = (1U << 12),   /* phy ready from 1 -> 0 */
-       PHYEV_IU_BIG            = (1U << 11),   /* IU too long err */
-       PHYEV_IU_SMALL          = (1U << 10),   /* IU too short err */
-       PHYEV_UNK_TAG           = (1U << 9),    /* unknown tag */
-       PHYEV_BROAD_CH          = (1U << 8),    /* broadcast(CHANGE) */
-       PHYEV_COMWAKE           = (1U << 7),    /* COMWAKE rx'd */
-       PHYEV_PORT_SEL          = (1U << 6),    /* port selector present */
-       PHYEV_HARD_RST          = (1U << 5),    /* hard reset rx'd */
-       PHYEV_ID_TMOUT          = (1U << 4),    /* identify timeout */
-       PHYEV_ID_FAIL           = (1U << 3),    /* identify failed */
-       PHYEV_ID_DONE           = (1U << 2),    /* identify done */
-       PHYEV_HARD_RST_DONE     = (1U << 1),    /* hard reset done */
-       PHYEV_RDY_CH            = (1U << 0),    /* phy ready changed state */
-
-       /* MVS_PCS */
-       PCS_EN_SATA_REG_SHIFT   = (16),         /* Enable SATA Register Set */
-       PCS_EN_PORT_XMT_SHIFT   = (12),         /* Enable Port Transmit */
-       PCS_EN_PORT_XMT_SHIFT2  = (8),          /* For 6480 */
-       PCS_SATA_RETRY          = (1U << 8),    /* retry ctl FIS on R_ERR */
-       PCS_RSP_RX_EN           = (1U << 7),    /* raw response rx */
-       PCS_SELF_CLEAR          = (1U << 5),    /* self-clearing int mode */
-       PCS_FIS_RX_EN           = (1U << 4),    /* FIS rx enable */
-       PCS_CMD_STOP_ERR        = (1U << 3),    /* cmd stop-on-err enable */
-       PCS_CMD_RST             = (1U << 1),    /* reset cmd issue */
-       PCS_CMD_EN              = (1U << 0),    /* enable cmd issue */
-
-       /* Port n Attached Device Info */
-       PORT_DEV_SSP_TRGT       = (1U << 19),
-       PORT_DEV_SMP_TRGT       = (1U << 18),
-       PORT_DEV_STP_TRGT       = (1U << 17),
-       PORT_DEV_SSP_INIT       = (1U << 11),
-       PORT_DEV_SMP_INIT       = (1U << 10),
-       PORT_DEV_STP_INIT       = (1U << 9),
-       PORT_PHY_ID_MASK        = (0xFFU << 24),
-       PORT_DEV_TRGT_MASK      = (0x7U << 17),
-       PORT_DEV_INIT_MASK      = (0x7U << 9),
-       PORT_DEV_TYPE_MASK      = (0x7U << 0),
-
-       /* Port n PHY Status */
-       PHY_RDY                 = (1U << 2),
-       PHY_DW_SYNC             = (1U << 1),
-       PHY_OOB_DTCTD           = (1U << 0),
-
-       /* VSR */
-       /* PHYMODE 6 (CDB) */
-       PHY_MODE6_LATECLK       = (1U << 29),   /* Lock Clock */
-       PHY_MODE6_DTL_SPEED     = (1U << 27),   /* Digital Loop Speed */
-       PHY_MODE6_FC_ORDER      = (1U << 26),   /* Fibre Channel Mode Order*/
-       PHY_MODE6_MUCNT_EN      = (1U << 24),   /* u Count Enable */
-       PHY_MODE6_SEL_MUCNT_LEN = (1U << 22),   /* Training Length Select */
-       PHY_MODE6_SELMUPI       = (1U << 20),   /* Phase Multi Select (init) */
-       PHY_MODE6_SELMUPF       = (1U << 18),   /* Phase Multi Select (final) */
-       PHY_MODE6_SELMUFF       = (1U << 16),   /* Freq Loop Multi Sel(final) */
-       PHY_MODE6_SELMUFI       = (1U << 14),   /* Freq Loop Multi Sel(init) */
-       PHY_MODE6_FREEZE_LOOP   = (1U << 12),   /* Freeze Rx CDR Loop */
-       PHY_MODE6_INT_RXFOFFS   = (1U << 3),    /* Rx CDR Freq Loop Enable */
-       PHY_MODE6_FRC_RXFOFFS   = (1U << 2),    /* Initial Rx CDR Offset */
-       PHY_MODE6_STAU_0D8      = (1U << 1),    /* Rx CDR Freq Loop Saturate */
-       PHY_MODE6_RXSAT_DIS     = (1U << 0),    /* Saturate Ctl */
-};
-
-enum mvs_info_flags {
-       MVF_MSI                 = (1U << 0),    /* MSI is enabled */
-       MVF_PHY_PWR_FIX         = (1U << 1),    /* bug workaround */
-};
-
-enum sas_cmd_port_registers {
-       CMD_CMRST_OOB_DET       = 0x100, /* COMRESET OOB detect register */
-       CMD_CMWK_OOB_DET        = 0x104, /* COMWAKE OOB detect register */
-       CMD_CMSAS_OOB_DET       = 0x108, /* COMSAS OOB detect register */
-       CMD_BRST_OOB_DET        = 0x10c, /* burst OOB detect register */
-       CMD_OOB_SPACE           = 0x110, /* OOB space control register */
-       CMD_OOB_BURST           = 0x114, /* OOB burst control register */
-       CMD_PHY_TIMER           = 0x118, /* PHY timer control register */
-       CMD_PHY_CONFIG0         = 0x11c, /* PHY config register 0 */
-       CMD_PHY_CONFIG1         = 0x120, /* PHY config register 1 */
-       CMD_SAS_CTL0            = 0x124, /* SAS control register 0 */
-       CMD_SAS_CTL1            = 0x128, /* SAS control register 1 */
-       CMD_SAS_CTL2            = 0x12c, /* SAS control register 2 */
-       CMD_SAS_CTL3            = 0x130, /* SAS control register 3 */
-       CMD_ID_TEST             = 0x134, /* ID test register */
-       CMD_PL_TIMER            = 0x138, /* PL timer register */
-       CMD_WD_TIMER            = 0x13c, /* WD timer register */
-       CMD_PORT_SEL_COUNT      = 0x140, /* port selector count register */
-       CMD_APP_MEM_CTL         = 0x144, /* Application Memory Control */
-       CMD_XOR_MEM_CTL         = 0x148, /* XOR Block Memory Control */
-       CMD_DMA_MEM_CTL         = 0x14c, /* DMA Block Memory Control */
-       CMD_PORT_MEM_CTL0       = 0x150, /* Port Memory Control 0 */
-       CMD_PORT_MEM_CTL1       = 0x154, /* Port Memory Control 1 */
-       CMD_SATA_PORT_MEM_CTL0  = 0x158, /* SATA Port Memory Control 0 */
-       CMD_SATA_PORT_MEM_CTL1  = 0x15c, /* SATA Port Memory Control 1 */
-       CMD_XOR_MEM_BIST_CTL    = 0x160, /* XOR Memory BIST Control */
-       CMD_XOR_MEM_BIST_STAT   = 0x164, /* XOR Memroy BIST Status */
-       CMD_DMA_MEM_BIST_CTL    = 0x168, /* DMA Memory BIST Control */
-       CMD_DMA_MEM_BIST_STAT   = 0x16c, /* DMA Memory BIST Status */
-       CMD_PORT_MEM_BIST_CTL   = 0x170, /* Port Memory BIST Control */
-       CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
-       CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
-       CMD_STP_MEM_BIST_CTL    = 0x17c, /* STP Memory BIST Control */
-       CMD_STP_MEM_BIST_STAT0  = 0x180, /* STP Memory BIST Status 0 */
-       CMD_STP_MEM_BIST_STAT1  = 0x184, /* STP Memory BIST Status 1 */
-       CMD_RESET_COUNT         = 0x188, /* Reset Count */
-       CMD_MONTR_DATA_SEL      = 0x18C, /* Monitor Data/Select */
-       CMD_PLL_PHY_CONFIG      = 0x190, /* PLL/PHY Configuration */
-       CMD_PHY_CTL             = 0x194, /* PHY Control and Status */
-       CMD_PHY_TEST_COUNT0     = 0x198, /* Phy Test Count 0 */
-       CMD_PHY_TEST_COUNT1     = 0x19C, /* Phy Test Count 1 */
-       CMD_PHY_TEST_COUNT2     = 0x1A0, /* Phy Test Count 2 */
-       CMD_APP_ERR_CONFIG      = 0x1A4, /* Application Error Configuration */
-       CMD_PND_FIFO_CTL0       = 0x1A8, /* Pending FIFO Control 0 */
-       CMD_HOST_CTL            = 0x1AC, /* Host Control Status */
-       CMD_HOST_WR_DATA        = 0x1B0, /* Host Write Data */
-       CMD_HOST_RD_DATA        = 0x1B4, /* Host Read Data */
-       CMD_PHY_MODE_21         = 0x1B8, /* Phy Mode 21 */
-       CMD_SL_MODE0            = 0x1BC, /* SL Mode 0 */
-       CMD_SL_MODE1            = 0x1C0, /* SL Mode 1 */
-       CMD_PND_FIFO_CTL1       = 0x1C4, /* Pending FIFO Control 1 */
-};
-
-/* SAS/SATA configuration port registers, aka phy registers */
-enum sas_sata_config_port_regs {
-       PHYR_IDENTIFY           = 0x00, /* info for IDENTIFY frame */
-       PHYR_ADDR_LO            = 0x04, /* my SAS address (low) */
-       PHYR_ADDR_HI            = 0x08, /* my SAS address (high) */
-       PHYR_ATT_DEV_INFO       = 0x0C, /* attached device info */
-       PHYR_ATT_ADDR_LO        = 0x10, /* attached dev SAS addr (low) */
-       PHYR_ATT_ADDR_HI        = 0x14, /* attached dev SAS addr (high) */
-       PHYR_SATA_CTL           = 0x18, /* SATA control */
-       PHYR_PHY_STAT           = 0x1C, /* PHY status */
-       PHYR_SATA_SIG0          = 0x20, /*port SATA signature FIS(Byte 0-3) */
-       PHYR_SATA_SIG1          = 0x24, /*port SATA signature FIS(Byte 4-7) */
-       PHYR_SATA_SIG2          = 0x28, /*port SATA signature FIS(Byte 8-11) */
-       PHYR_SATA_SIG3          = 0x2c, /*port SATA signature FIS(Byte 12-15) */
-       PHYR_R_ERR_COUNT        = 0x30, /* port R_ERR count register */
-       PHYR_CRC_ERR_COUNT      = 0x34, /* port CRC error count register */
-       PHYR_WIDE_PORT          = 0x38, /* wide port participating */
-       PHYR_CURRENT0           = 0x80, /* current connection info 0 */
-       PHYR_CURRENT1           = 0x84, /* current connection info 1 */
-       PHYR_CURRENT2           = 0x88, /* current connection info 2 */
-};
-
-/*  SAS/SATA Vendor Specific Port Registers */
-enum sas_sata_vsp_regs {
-       VSR_PHY_STAT            = 0x00, /* Phy Status */
-       VSR_PHY_MODE1           = 0x01, /* phy tx */
-       VSR_PHY_MODE2           = 0x02, /* tx scc */
-       VSR_PHY_MODE3           = 0x03, /* pll */
-       VSR_PHY_MODE4           = 0x04, /* VCO */
-       VSR_PHY_MODE5           = 0x05, /* Rx */
-       VSR_PHY_MODE6           = 0x06, /* CDR */
-       VSR_PHY_MODE7           = 0x07, /* Impedance */
-       VSR_PHY_MODE8           = 0x08, /* Voltage */
-       VSR_PHY_MODE9           = 0x09, /* Test */
-       VSR_PHY_MODE10          = 0x0A, /* Power */
-       VSR_PHY_MODE11          = 0x0B, /* Phy Mode */
-       VSR_PHY_VS0             = 0x0C, /* Vednor Specific 0 */
-       VSR_PHY_VS1             = 0x0D, /* Vednor Specific 1 */
-};
-
-enum pci_cfg_registers {
-       PCR_PHY_CTL     = 0x40,
-       PCR_PHY_CTL2    = 0x90,
-       PCR_DEV_CTRL    = 0xE8,
-};
-
-enum pci_cfg_register_bits {
-       PCTL_PWR_ON     = (0xFU << 24),
-       PCTL_OFF        = (0xFU << 12),
-       PRD_REQ_SIZE    = (0x4000),
-       PRD_REQ_MASK    = (0x00007000),
-};
-
-enum nvram_layout_offsets {
-       NVR_SIG         = 0x00,         /* 0xAA, 0x55 */
-       NVR_SAS_ADDR    = 0x02,         /* 8-byte SAS address */
-};
-
-enum chip_flavors {
-       chip_6320,
-       chip_6440,
-       chip_6480,
-};
-
-enum port_type {
-       PORT_TYPE_SAS   =  (1L << 1),
-       PORT_TYPE_SATA  =  (1L << 0),
-};
-
-/* Command Table Format */
-enum ct_format {
-       /* SSP */
-       SSP_F_H         =  0x00,
-       SSP_F_IU        =  0x18,
-       SSP_F_MAX       =  0x4D,
-       /* STP */
-       STP_CMD_FIS     =  0x00,
-       STP_ATAPI_CMD   =  0x40,
-       STP_F_MAX       =  0x10,
-       /* SMP */
-       SMP_F_T         =  0x00,
-       SMP_F_DEP       =  0x01,
-       SMP_F_MAX       =  0x101,
-};
-
-enum status_buffer {
-       SB_EIR_OFF      =  0x00,        /* Error Information Record */
-       SB_RFB_OFF      =  0x08,        /* Response Frame Buffer */
-       SB_RFB_MAX      =  0x400,       /* RFB size*/
-};
-
-enum error_info_rec {
-       CMD_ISS_STPD    = (1U << 31),   /* Cmd Issue Stopped */
-       CMD_PI_ERR      = (1U << 30),   /* Protection info error.  see flags2 */
-       RSP_OVER        = (1U << 29),   /* rsp buffer overflow */
-       RETRY_LIM       = (1U << 28),   /* FIS/frame retry limit exceeded */
-       UNK_FIS         = (1U << 27),   /* unknown FIS */
-       DMA_TERM        = (1U << 26),   /* DMA terminate primitive rx'd */
-       SYNC_ERR        = (1U << 25),   /* SYNC rx'd during frame xmit */
-       TFILE_ERR       = (1U << 24),   /* SATA taskfile Error bit set */
-       R_ERR           = (1U << 23),   /* SATA returned R_ERR prim */
-       RD_OFS          = (1U << 20),   /* Read DATA frame invalid offset */
-       XFER_RDY_OFS    = (1U << 19),   /* XFER_RDY offset error */
-       UNEXP_XFER_RDY  = (1U << 18),   /* unexpected XFER_RDY error */
-       DATA_OVER_UNDER = (1U << 16),   /* data overflow/underflow */
-       INTERLOCK       = (1U << 15),   /* interlock error */
-       NAK             = (1U << 14),   /* NAK rx'd */
-       ACK_NAK_TO      = (1U << 13),   /* ACK/NAK timeout */
-       CXN_CLOSED      = (1U << 12),   /* cxn closed w/out ack/nak */
-       OPEN_TO         = (1U << 11),   /* I_T nexus lost, open cxn timeout */
-       PATH_BLOCKED    = (1U << 10),   /* I_T nexus lost, pathway blocked */
-       NO_DEST         = (1U << 9),    /* I_T nexus lost, no destination */
-       STP_RES_BSY     = (1U << 8),    /* STP resources busy */
-       BREAK           = (1U << 7),    /* break received */
-       BAD_DEST        = (1U << 6),    /* bad destination */
-       BAD_PROTO       = (1U << 5),    /* protocol not supported */
-       BAD_RATE        = (1U << 4),    /* cxn rate not supported */
-       WRONG_DEST      = (1U << 3),    /* wrong destination error */
-       CREDIT_TO       = (1U << 2),    /* credit timeout */
-       WDOG_TO         = (1U << 1),    /* watchdog timeout */
-       BUF_PAR         = (1U << 0),    /* buffer parity error */
-};
-
-enum error_info_rec_2 {
-       SLOT_BSY_ERR    = (1U << 31),   /* Slot Busy Error */
-       GRD_CHK_ERR     = (1U << 14),   /* Guard Check Error */
-       APP_CHK_ERR     = (1U << 13),   /* Application Check error */
-       REF_CHK_ERR     = (1U << 12),   /* Reference Check Error */
-       USR_BLK_NM      = (1U << 0),    /* User Block Number */
-};
-
-struct mvs_chip_info {
-       u32             n_phy;
-       u32             srs_sz;
-       u32             slot_width;
-};
-
-struct mvs_err_info {
-       __le32                  flags;
-       __le32                  flags2;
-};
-
-struct mvs_prd {
-       __le64                  addr;           /* 64-bit buffer address */
-       __le32                  reserved;
-       __le32                  len;            /* 16-bit length */
-};
-
-struct mvs_cmd_hdr {
-       __le32                  flags;          /* PRD tbl len; SAS, SATA ctl */
-       __le32                  lens;           /* cmd, max resp frame len */
-       __le32                  tags;           /* targ port xfer tag; tag */
-       __le32                  data_len;       /* data xfer len */
-       __le64                  cmd_tbl;        /* command table address */
-       __le64                  open_frame;     /* open addr frame address */
-       __le64                  status_buf;     /* status buffer address */
-       __le64                  prd_tbl;        /* PRD tbl address */
-       __le32                  reserved[4];
-};
-
-struct mvs_port {
-       struct asd_sas_port     sas_port;
-       u8                      port_attached;
-       u8                      taskfileset;
-       u8                      wide_port_phymap;
-       struct list_head        list;
-};
-
-struct mvs_phy {
-       struct mvs_port         *port;
-       struct asd_sas_phy      sas_phy;
-       struct sas_identify     identify;
-       struct scsi_device      *sdev;
-       u64             dev_sas_addr;
-       u64             att_dev_sas_addr;
-       u32             att_dev_info;
-       u32             dev_info;
-       u32             phy_type;
-       u32             phy_status;
-       u32             irq_status;
-       u32             frame_rcvd_size;
-       u8              frame_rcvd[32];
-       u8              phy_attached;
-       enum sas_linkrate       minimum_linkrate;
-       enum sas_linkrate       maximum_linkrate;
-};
-
-struct mvs_slot_info {
-       struct list_head        list;
-       struct sas_task         *task;
-       u32                     n_elem;
-       u32                     tx;
-
-       /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
-        * and PRD table
-        */
-       void                    *buf;
-       dma_addr_t              buf_dma;
-#if _MV_DUMP
-       u32                     cmd_size;
-#endif
-
-       void                    *response;
-       struct mvs_port         *port;
-};
-
-struct mvs_info {
-       unsigned long           flags;
-
-       spinlock_t              lock;           /* host-wide lock */
-       struct pci_dev          *pdev;          /* our device */
-       void __iomem            *regs;          /* enhanced mode registers */
-       void __iomem            *peri_regs;     /* peripheral registers */
-
-       u8                      sas_addr[SAS_ADDR_SIZE];
-       struct sas_ha_struct    sas;            /* SCSI/SAS glue */
-       struct Scsi_Host        *shost;
-
-       __le32                  *tx;            /* TX (delivery) DMA ring */
-       dma_addr_t              tx_dma;
-       u32                     tx_prod;        /* cached next-producer idx */
-
-       __le32                  *rx;            /* RX (completion) DMA ring */
-       dma_addr_t              rx_dma;
-       u32                     rx_cons;        /* RX consumer idx */
-
-       __le32                  *rx_fis;        /* RX'd FIS area */
-       dma_addr_t              rx_fis_dma;
-
-       struct mvs_cmd_hdr      *slot;  /* DMA command header slots */
-       dma_addr_t              slot_dma;
-
-       const struct mvs_chip_info *chip;
-
-       u8                      tags[MVS_SLOTS];
-       struct mvs_slot_info    slot_info[MVS_SLOTS];
-                               /* further per-slot information */
-       struct mvs_phy          phy[MVS_MAX_PHYS];
-       struct mvs_port         port[MVS_MAX_PHYS];
-#ifdef MVS_USE_TASKLET
-       struct tasklet_struct   tasklet;
-#endif
-};
-
-static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
-                          void *funcdata);
-static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port);
-static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val);
-static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port);
-static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val);
-static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val);
-static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port);
-
-static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i);
-static void mvs_detect_porttype(struct mvs_info *mvi, int i);
-static void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
-static void mvs_release_task(struct mvs_info *mvi, int phy_no);
-
-static int mvs_scan_finished(struct Scsi_Host *, unsigned long);
-static void mvs_scan_start(struct Scsi_Host *);
-static int mvs_slave_configure(struct scsi_device *sdev);
-
-static struct scsi_transport_template *mvs_stt;
-
-static const struct mvs_chip_info mvs_chips[] = {
-       [chip_6320] =           { 2, 16, 9  },
-       [chip_6440] =           { 4, 16, 9  },
-       [chip_6480] =           { 8, 32, 10 },
-};
-
-static struct scsi_host_template mvs_sht = {
-       .module                 = THIS_MODULE,
-       .name                   = DRV_NAME,
-       .queuecommand           = sas_queuecommand,
-       .target_alloc           = sas_target_alloc,
-       .slave_configure        = mvs_slave_configure,
-       .slave_destroy          = sas_slave_destroy,
-       .scan_finished          = mvs_scan_finished,
-       .scan_start             = mvs_scan_start,
-       .change_queue_depth     = sas_change_queue_depth,
-       .change_queue_type      = sas_change_queue_type,
-       .bios_param             = sas_bios_param,
-       .can_queue              = 1,
-       .cmd_per_lun            = 1,
-       .this_id                = -1,
-       .sg_tablesize           = SG_ALL,
-       .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
-       .use_clustering         = ENABLE_CLUSTERING,
-       .eh_device_reset_handler        = sas_eh_device_reset_handler,
-       .eh_bus_reset_handler   = sas_eh_bus_reset_handler,
-       .slave_alloc            = sas_slave_alloc,
-       .target_destroy         = sas_target_destroy,
-       .ioctl                  = sas_ioctl,
-};
-
-static void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
-{
-       u32 i;
-       u32 run;
-       u32 offset;
-
-       offset = 0;
-       while (size) {
-               printk("%08X : ", baseaddr + offset);
-               if (size >= 16)
-                       run = 16;
-               else
-                       run = size;
-               size -= run;
-               for (i = 0; i < 16; i++) {
-                       if (i < run)
-                               printk("%02X ", (u32)data[i]);
-                       else
-                               printk("   ");
-               }
-               printk(": ");
-               for (i = 0; i < run; i++)
-                       printk("%c", isalnum(data[i]) ? data[i] : '.');
-               printk("\n");
-               data = &data[16];
-               offset += run;
-       }
-       printk("\n");
-}
-
-#if _MV_DUMP
-static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
-                                  enum sas_protocol proto)
-{
-       u32 offset;
-       struct pci_dev *pdev = mvi->pdev;
-       struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
-       offset = slot->cmd_size + MVS_OAF_SZ +
-           sizeof(struct mvs_prd) * slot->n_elem;
-       dev_printk(KERN_DEBUG, &pdev->dev, "+---->Status buffer[%d] :\n",
-                       tag);
-       mvs_hexdump(32, (u8 *) slot->response,
-                   (u32) slot->buf_dma + offset);
-}
-#endif
-
-static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
-                               enum sas_protocol proto)
-{
-#if _MV_DUMP
-       u32 sz, w_ptr;
-       u64 addr;
-       void __iomem *regs = mvi->regs;
-       struct pci_dev *pdev = mvi->pdev;
-       struct mvs_slot_info *slot = &mvi->slot_info[tag];
-
-       /*Delivery Queue */
-       sz = mr32(TX_CFG) & TX_RING_SZ_MASK;
-       w_ptr = slot->tx;
-       addr = mr32(TX_HI) << 16 << 16 | mr32(TX_LO);
-       dev_printk(KERN_DEBUG, &pdev->dev,
-               "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
-       dev_printk(KERN_DEBUG, &pdev->dev,
-               "Delivery Queue Base Address=0x%llX (PA)"
-               "(tx_dma=0x%llX), Entry=%04d\n",
-               addr, mvi->tx_dma, w_ptr);
-       mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
-                       (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
-       /*Command List */
-       addr = mvi->slot_dma;
-       dev_printk(KERN_DEBUG, &pdev->dev,
-               "Command List Base Address=0x%llX (PA)"
-               "(slot_dma=0x%llX), Header=%03d\n",
-               addr, slot->buf_dma, tag);
-       dev_printk(KERN_DEBUG, &pdev->dev, "Command Header[%03d]:\n", tag);
-       /*mvs_cmd_hdr */
-       mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
-               (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
-       /*1.command table area */
-       dev_printk(KERN_DEBUG, &pdev->dev, "+---->Command Table :\n");
-       mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
-       /*2.open address frame area */
-       dev_printk(KERN_DEBUG, &pdev->dev, "+---->Open Address Frame :\n");
-       mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
-                               (u32) slot->buf_dma + slot->cmd_size);
-       /*3.status buffer */
-       mvs_hba_sb_dump(mvi, tag, proto);
-       /*4.PRD table */
-       dev_printk(KERN_DEBUG, &pdev->dev, "+---->PRD table :\n");
-       mvs_hexdump(sizeof(struct mvs_prd) * slot->n_elem,
-               (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
-               (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
-#endif
-}
-
-static void mvs_hba_cq_dump(struct mvs_info *mvi)
-{
-#if (_MV_DUMP > 2)
-       u64 addr;
-       void __iomem *regs = mvi->regs;
-       struct pci_dev *pdev = mvi->pdev;
-       u32 entry = mvi->rx_cons + 1;
-       u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
-
-       /*Completion Queue */
-       addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
-       dev_printk(KERN_DEBUG, &pdev->dev, "Completion Task = 0x%p\n",
-                  mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
-       dev_printk(KERN_DEBUG, &pdev->dev,
-               "Completion List Base Address=0x%llX (PA), "
-               "CQ_Entry=%04d, CQ_WP=0x%08X\n",
-               addr, entry - 1, mvi->rx[0]);
-       mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
-                   mvi->rx_dma + sizeof(u32) * entry);
-#endif
-}
-
-static void mvs_hba_interrupt_enable(struct mvs_info *mvi)
-{
-       void __iomem *regs = mvi->regs;
-       u32 tmp;
-
-       tmp = mr32(GBL_CTL);
-
-       mw32(GBL_CTL, tmp | INT_EN);
-}
-
-static void mvs_hba_interrupt_disable(struct mvs_info *mvi)
-{
-       void __iomem *regs = mvi->regs;
-       u32 tmp;
-
-       tmp = mr32(GBL_CTL);
-
-       mw32(GBL_CTL, tmp & ~INT_EN);
-}
-
-static int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
-
-/* move to PCI layer or libata core? */
-static int pci_go_64(struct pci_dev *pdev)
-{
-       int rc;
-
-       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
-               if (rc) {
-                       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-                       if (rc) {
-                               dev_printk(KERN_ERR, &pdev->dev,
-                                          "64-bit DMA enable failed\n");
-                               return rc;
-                       }
-               }
-       } else {
-               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_printk(KERN_ERR, &pdev->dev,
-                                  "32-bit DMA enable failed\n");
-                       return rc;
-               }
-               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
-               if (rc) {
-                       dev_printk(KERN_ERR, &pdev->dev,
-                                  "32-bit consistent DMA enable failed\n");
-                       return rc;
-               }
-       }
-
-       return rc;
-}
-
-static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
-{
-       if (task->lldd_task) {
-               struct mvs_slot_info *slot;
-               slot = (struct mvs_slot_info *) task->lldd_task;
-               *tag = slot - mvi->slot_info;
-               return 1;
-       }
-       return 0;
-}
-
-static void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
-{
-       void *bitmap = (void *) &mvi->tags;
-       clear_bit(tag, bitmap);
-}
-
-static void mvs_tag_free(struct mvs_info *mvi, u32 tag)
-{
-       mvs_tag_clear(mvi, tag);
-}
-
-static void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
-{
-       void *bitmap = (void *) &mvi->tags;
-       set_bit(tag, bitmap);
-}
-
-static int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
-{
-       unsigned int index, tag;
-       void *bitmap = (void *) &mvi->tags;
-
-       index = find_first_zero_bit(bitmap, MVS_SLOTS);
-       tag = index;
-       if (tag >= MVS_SLOTS)
-               return -SAS_QUEUE_FULL;
-       mvs_tag_set(mvi, tag);
-       *tag_out = tag;
-       return 0;
-}
-
-static void mvs_tag_init(struct mvs_info *mvi)
-{
-       int i;
-       for (i = 0; i < MVS_SLOTS; ++i)
-               mvs_tag_clear(mvi, i);
-}
-
-#ifndef MVS_DISABLE_NVRAM
-static int mvs_eep_read(void __iomem *regs, u32 addr, u32 *data)
-{
-       int timeout = 1000;
-
-       if (addr & ~SPI_ADDR_MASK)
-               return -EINVAL;
-
-       writel(addr, regs + SPI_CMD);
-       writel(TWSI_RD, regs + SPI_CTL);
-
-       while (timeout-- > 0) {
-               if (readl(regs + SPI_CTL) & TWSI_RDY) {
-                       *data = readl(regs + SPI_DATA);
-                       return 0;
-               }
-
-               udelay(10);
-       }
-
-       return -EBUSY;
-}
-
-static int mvs_eep_read_buf(void __iomem *regs, u32 addr,
-                           void *buf, u32 buflen)
-{
-       u32 addr_end, tmp_addr, i, j;
-       u32 tmp = 0;
-       int rc;
-       u8 *tmp8, *buf8 = buf;
-
-       addr_end = addr + buflen;
-       tmp_addr = ALIGN(addr, 4);
-       if (addr > 0xff)
-               return -EINVAL;
-
-       j = addr & 0x3;
-       if (j) {
-               rc = mvs_eep_read(regs, tmp_addr, &tmp);
-               if (rc)
-                       return rc;
-
-               tmp8 = (u8 *)&tmp;
-               for (i = j; i < 4; i++)
-                       *buf8++ = tmp8[i];
-
-               tmp_addr += 4;
-       }
-
-       for (j = ALIGN(addr_end, 4); tmp_addr < j; tmp_addr += 4) {
-               rc = mvs_eep_read(regs, tmp_addr, &tmp);
-               if (rc)
-                       return rc;
-
-               memcpy(buf8, &tmp, 4);
-               buf8 += 4;
-       }
-
-       if (tmp_addr < addr_end) {
-               rc = mvs_eep_read(regs, tmp_addr, &tmp);
-               if (rc)
-                       return rc;
-
-               tmp8 = (u8 *)&tmp;
-               j = addr_end - tmp_addr;
-               for (i = 0; i < j; i++)
-                       *buf8++ = tmp8[i];
-
-               tmp_addr += 4;
-       }
-
-       return 0;
-}
-#endif
-
-static int mvs_nvram_read(struct mvs_info *mvi, u32 addr,
-                         void *buf, u32 buflen)
-{
-#ifndef MVS_DISABLE_NVRAM
-       void __iomem *regs = mvi->regs;
-       int rc, i;
-       u32 sum;
-       u8 hdr[2], *tmp;
-       const char *msg;
-
-       rc = mvs_eep_read_buf(regs, addr, &hdr, 2);
-       if (rc) {
-               msg = "nvram hdr read failed";
-               goto err_out;
-       }
-       rc = mvs_eep_read_buf(regs, addr + 2, buf, buflen);
-       if (rc) {
-               msg = "nvram read failed";
-               goto err_out;
-       }
-
-       if (hdr[0] != 0x5A) {
-               /* entry id */
-               msg = "invalid nvram entry id";
-               rc = -ENOENT;
-               goto err_out;
-       }
-
-       tmp = buf;
-       sum = ((u32)hdr[0]) + ((u32)hdr[1]);
-       for (i = 0; i < buflen; i++)
-               sum += ((u32)tmp[i]);
-
-       if (sum) {
-               msg = "nvram checksum failure";
-               rc = -EILSEQ;
-               goto err_out;
-       }
-
-       return 0;
-
-err_out:
-       dev_printk(KERN_ERR, &mvi->pdev->dev, "%s", msg);
-       return rc;
-#else
-       /* FIXME , For SAS target mode */
-       memcpy(buf, "\x50\x05\x04\x30\x11\xab\x00\x00", 8);
-       return 0;
-#endif
-}
-
-static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
-{
-       struct mvs_phy *phy = &mvi->phy[i];
-       struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
-
-       if (!phy->phy_attached)
-               return;
-
-       if (sas_phy->phy) {
-               struct sas_phy *sphy = sas_phy->phy;
-
-               sphy->negotiated_linkrate = sas_phy->linkrate;
-               sphy->minimum_linkrate = phy->minimum_linkrate;
-               sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
-               sphy->maximum_linkrate = phy->maximum_linkrate;
-               sphy->maximum_linkrate_hw = SAS_LINK_RATE_3_0_GBPS;
-       }
-
-       if (phy->phy_type & PORT_TYPE_SAS) {
-               struct sas_identify_frame *id;
-
-               id = (struct sas_identify_frame *)phy->frame_rcvd;
-               id->dev_type = phy->identify.device_type;
-               id->initiator_bits = SAS_PROTOCOL_ALL;
-               id->target_bits = phy->identify.target_port_protocols;
-       } else if (phy->phy_type & PORT_TYPE_SATA) {
-               /* TODO */
-       }
-       mvi->sas.sas_phy[i]->frame_rcvd_size = phy->frame_rcvd_size;
-       mvi->sas.notify_port_event(mvi->sas.sas_phy[i],
-                                  PORTE_BYTES_DMAED);
-}
-
-static int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
-{
-       /* give the phy enabling interrupt event time to come in (1s
-        * is empirically about all it takes) */
-       if (time < HZ)
-               return 0;
-       /* Wait for discovery to finish */
-       scsi_flush_work(shost);
-       return 1;
-}
-
-static void mvs_scan_start(struct Scsi_Host *shost)
-{
-       int i;
-       struct mvs_info *mvi = SHOST_TO_SAS_HA(shost)->lldd_ha;
-
-       for (i = 0; i < mvi->chip->n_phy; ++i) {
-               mvs_bytes_dmaed(mvi, i);
-       }
-}
-
-static int mvs_slave_configure(struct scsi_device *sdev)
-{
-       struct domain_device *dev = sdev_to_domain_dev(sdev);
-       int ret = sas_slave_configure(sdev);
-
-       if (ret)
-               return ret;
-
-       if (dev_is_sata(dev)) {
-               /* struct ata_port *ap = dev->sata_dev.ap; */
-               /* struct ata_device *adev = ap->link.device; */
-
-               /* clamp at no NCQ for the time being */
-               /* adev->flags |= ATA_DFLAG_NCQ_OFF; */
-               scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
-       }
-       return 0;
-}
-
-static void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
-{
-       struct pci_dev *pdev = mvi->pdev;
-       struct sas_ha_struct *sas_ha = &mvi->sas;
-       struct mvs_phy *phy = &mvi->phy[phy_no];
-       struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
-       phy->irq_status = mvs_read_port_irq_stat(mvi, phy_no);
-       /*
-       * events is port event now ,
-       * we need check the interrupt status which belongs to per port.
-       */
-       dev_printk(KERN_DEBUG, &pdev->dev,
-               "Port %d Event = %X\n",
-               phy_no, phy->irq_status);
-
-       if (phy->irq_status & (PHYEV_POOF | PHYEV_DEC_ERR)) {
-               mvs_release_task(mvi, phy_no);
-               if (!mvs_is_phy_ready(mvi, phy_no)) {
-                       sas_phy_disconnected(sas_phy);
-                       sas_ha->notify_phy_event(sas_phy, PHYE_LOSS_OF_SIGNAL);
-                       dev_printk(KERN_INFO, &pdev->dev,
-                               "Port %d Unplug Notice\n", phy_no);
-
-               } else
-                       mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET, NULL);
-       }
-       if (!(phy->irq_status & PHYEV_DEC_ERR)) {
-               if (phy->irq_status & PHYEV_COMWAKE) {
-                       u32 tmp = mvs_read_port_irq_mask(mvi, phy_no);
-                       mvs_write_port_irq_mask(mvi, phy_no,
-                                               tmp | PHYEV_SIG_FIS);
-               }
-               if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
-                       phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
-                       if (phy->phy_status) {
-                               mvs_detect_porttype(mvi, phy_no);
-
-                               if (phy->phy_type & PORT_TYPE_SATA) {
-                                       u32 tmp = mvs_read_port_irq_mask(mvi,
-                                                               phy_no);
-                                       tmp &= ~PHYEV_SIG_FIS;
-                                       mvs_write_port_irq_mask(mvi,
-                                                               phy_no, tmp);
-                               }
-
-                               mvs_update_phyinfo(mvi, phy_no, 0);
-                               sas_ha->notify_phy_event(sas_phy,
-                                                       PHYE_OOB_DONE);
-                               mvs_bytes_dmaed(mvi, phy_no);
-                       } else {
-                               dev_printk(KERN_DEBUG, &pdev->dev,
-                                       "plugin interrupt but phy is gone\n");
-                               mvs_phy_control(sas_phy, PHY_FUNC_LINK_RESET,
-                                                       NULL);
-                       }
-               } else if (phy->irq_status & PHYEV_BROAD_CH) {
-                       mvs_release_task(mvi, phy_no);
-                       sas_ha->notify_port_event(sas_phy,
-                                               PORTE_BROADCAST_RCVD);
-               }
-       }
-       mvs_write_port_irq_stat(mvi, phy_no, phy->irq_status);
-}
-
-static void mvs_int_sata(struct mvs_info *mvi)
-{
-       u32 tmp;
-       void __iomem *regs = mvi->regs;
-       tmp = mr32(INT_STAT_SRS);
-       mw32(INT_STAT_SRS, tmp & 0xFFFF);
-}
-
-static void mvs_slot_reset(struct mvs_info *mvi, struct sas_task *task,
-                               u32 slot_idx)
-{
-       void __iomem *regs = mvi->regs;
-       struct domain_device *dev = task->dev;
-       struct asd_sas_port *sas_port = dev->port;
-       struct mvs_port *port = mvi->slot_info[slot_idx].port;
-       u32 reg_set, phy_mask;
-
-       if (!sas_protocol_ata(task->task_proto)) {
-               reg_set = 0;
-               phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
-                               sas_port->phy_mask;
-       } else {
-               reg_set = port->taskfileset;
-               phy_mask = sas_port->phy_mask;
-       }
-       mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | slot_idx |
-                                       (TXQ_CMD_SLOT_RESET << TXQ_CMD_SHIFT) |
-                                       (phy_mask << TXQ_PHY_SHIFT) |
-                                       (reg_set << TXQ_SRS_SHIFT));
-
-       mw32(TX_PROD_IDX, mvi->tx_prod);
-       mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
-}
-
-static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
-                       u32 slot_idx, int err)
-{
-       struct mvs_port *port = mvi->slot_info[slot_idx].port;
-       struct task_status_struct *tstat = &task->task_status;
-       struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
-       int stat = SAM_GOOD;
-
-       resp->frame_len = sizeof(struct dev_to_host_fis);
-       memcpy(&resp->ending_fis[0],
-              SATA_RECEIVED_D2H_FIS(port->taskfileset),
-              sizeof(struct dev_to_host_fis));
-       tstat->buf_valid_size = sizeof(*resp);
-       if (unlikely(err))
-               stat = SAS_PROTO_RESPONSE;
-       return stat;
-}
-
-static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
-{
-       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
-       mvs_tag_clear(mvi, slot_idx);
-}
-
-static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
-                         struct mvs_slot_info *slot, u32 slot_idx)
-{
-       if (!sas_protocol_ata(task->task_proto))
-               if (slot->n_elem)
-                       pci_unmap_sg(mvi->pdev, task->scatter,
-                                    slot->n_elem, task->data_dir);
-
-       switch (task->task_proto) {
-       case SAS_PROTOCOL_SMP:
-               pci_unmap_sg(mvi->pdev, &task->smp_task.smp_resp, 1,
-                            PCI_DMA_FROMDEVICE);
-               pci_unmap_sg(mvi->pdev, &task->smp_task.smp_req, 1,
-                            PCI_DMA_TODEVICE);
-               break;
-
-       case SAS_PROTOCOL_SATA:
-       case SAS_PROTOCOL_STP:
-       case SAS_PROTOCOL_SSP:
-       default:
-               /* do nothing */
-               break;
-       }
-       list_del(&slot->list);
-       task->lldd_task = NULL;
-       slot->task = NULL;
-       slot->port = NULL;
-}
-
-static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
-                        u32 slot_idx)
-{
-       struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
-       u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
-       u32 err_dw1 = le32_to_cpu(*(u32 *) (slot->response + 4));
-       int stat = SAM_CHECK_COND;
-
-       if (err_dw1 & SLOT_BSY_ERR) {
-               stat = SAS_QUEUE_FULL;
-               mvs_slot_reset(mvi, task, slot_idx);
-       }
-       switch (task->task_proto) {
-       case SAS_PROTOCOL_SSP:
-               break;
-       case SAS_PROTOCOL_SMP:
-               break;
-       case SAS_PROTOCOL_SATA:
-       case SAS_PROTOCOL_STP:
-       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-               if (err_dw0 & TFILE_ERR)
-                       stat = mvs_sata_done(mvi, task, slot_idx, 1);
-               break;
-       default:
-               break;
-       }
-
-       mvs_hexdump(16, (u8 *) slot->response, 0);
-       return stat;
-}
-
-static int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
-{
-       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
-       struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
-       struct sas_task *task = slot->task;
-       struct task_status_struct *tstat;
-       struct mvs_port *port;
-       bool aborted;
-       void *to;
-
-       if (unlikely(!task || !task->lldd_task))
-               return -1;
-
-       mvs_hba_cq_dump(mvi);
-
-       spin_lock(&task->task_state_lock);
-       aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
-       if (!aborted) {
-               task->task_state_flags &=
-                   ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
-               task->task_state_flags |= SAS_TASK_STATE_DONE;
-       }
-       spin_unlock(&task->task_state_lock);
-
-       if (aborted) {
-               mvs_slot_task_free(mvi, task, slot, slot_idx);
-               mvs_slot_free(mvi, rx_desc);
-               return -1;
-       }
-
-       port = slot->port;
-       tstat = &task->task_status;
-       memset(tstat, 0, sizeof(*tstat));
-       tstat->resp = SAS_TASK_COMPLETE;
-
-       if (unlikely(!port->port_attached || flags)) {
-               mvs_slot_err(mvi, task, slot_idx);
-               if (!sas_protocol_ata(task->task_proto))
-                       tstat->stat = SAS_PHY_DOWN;
-               goto out;
-       }
-
-       /* error info record present */
-       if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
-               tstat->stat = mvs_slot_err(mvi, task, slot_idx);
-               goto out;
-       }
-
-       switch (task->task_proto) {
-       case SAS_PROTOCOL_SSP:
-               /* hw says status == 0, datapres == 0 */
-               if (rx_desc & RXQ_GOOD) {
-                       tstat->stat = SAM_GOOD;
-                       tstat->resp = SAS_TASK_COMPLETE;
-               }
-               /* response frame present */
-               else if (rx_desc & RXQ_RSP) {
-                       struct ssp_response_iu *iu =
-                           slot->response + sizeof(struct mvs_err_info);
-                       sas_ssp_task_response(&mvi->pdev->dev, task, iu);
-               }
-
-               /* should never happen? */
-               else
-                       tstat->stat = SAM_CHECK_COND;
-               break;
-
-       case SAS_PROTOCOL_SMP: {
-                       struct scatterlist *sg_resp = &task->smp_task.smp_resp;
-                       tstat->stat = SAM_GOOD;
-                       to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
-                       memcpy(to + sg_resp->offset,
-                               slot->response + sizeof(struct mvs_err_info),
-                               sg_dma_len(sg_resp));
-                       kunmap_atomic(to, KM_IRQ0);
-                       break;
-               }
-
-       case SAS_PROTOCOL_SATA:
-       case SAS_PROTOCOL_STP:
-       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
-                       tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
-                       break;
-               }
-
-       default:
-               tstat->stat = SAM_CHECK_COND;
-               break;
-       }
-
-out:
-       mvs_slot_task_free(mvi, task, slot, slot_idx);
-       if (unlikely(tstat->stat != SAS_QUEUE_FULL))
-               mvs_slot_free(mvi, rx_desc);
-
-       spin_unlock(&mvi->lock);
-       task->task_done(task);
-       spin_lock(&mvi->lock);
-       return tstat->stat;
-}
-
-static void mvs_release_task(struct mvs_info *mvi, int phy_no)
-{
-       struct list_head *pos, *n;
-       struct mvs_slot_info *slot;
-       struct mvs_phy *phy = &mvi->phy[phy_no];
-       struct mvs_port *port = phy->port;
-       u32 rx_desc;
-
-       if (!port)
-               return;
-
-       list_for_each_safe(pos, n, &port->list) {
-               slot = container_of(pos, struct mvs_slot_info, list);
-               rx_desc = (u32) (slot - mvi->slot_info);
-               mvs_slot_complete(mvi, rx_desc, 1);
-       }
-}
-
-static void mvs_int_full(struct mvs_info *mvi)
-{
-       void __iomem *regs = mvi->regs;
-       u32 tmp, stat;
-       int i;
-
-       stat = mr32(INT_STAT);
-
-       mvs_int_rx(mvi, false);
-
-       for (i = 0; i < MVS_MAX_PORTS; i++) {
-               tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
-               if (tmp)
-                       mvs_int_port(mvi, i, tmp);
-       }
-
-       if (stat & CINT_SRS)
-               mvs_int_sata(mvi);
-
-       mw32(INT_STAT, stat);
-}
-
-static int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
-{
-       void __iomem *regs = mvi->regs;
-       u32 rx_prod_idx, rx_desc;
-       bool attn = false;
-       struct pci_dev *pdev = mvi->pdev;
-
-       /* the first dword in the RX ring is special: it contains
-        * a mirror of the hardware's RX producer index, so that
-        * we don't have to stall the CPU reading that register.
-        * The actual RX ring is offset by one dword, due to this.
-        */
-       rx_prod_idx = mvi->rx_cons;
-       mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
-       if (mvi->rx_cons == 0xfff)      /* h/w hasn't touched RX ring yet */
-               return 0;
-
-       /* The CMPL_Q may come late, read from register and try again
-       * note: if coalescing is enabled,
-       * it will need to read from register every time for sure
-       */
-       if (mvi->rx_cons == rx_prod_idx)
-               mvi->rx_cons = mr32(RX_CONS_IDX) & RX_RING_SZ_MASK;
-
-       if (mvi->rx_cons == rx_prod_idx)
-               return 0;
-
-       while (mvi->rx_cons != rx_prod_idx) {
-
-               /* increment our internal RX consumer pointer */
-               rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
-
-               rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
-
-               if (likely(rx_desc & RXQ_DONE))
-                       mvs_slot_complete(mvi, rx_desc, 0);
-               if (rx_desc & RXQ_ATTN) {
-                       attn = true;
-                       dev_printk(KERN_DEBUG, &pdev->dev, "ATTN %X\n",
-                               rx_desc);
-               } else if (rx_desc & RXQ_ERR) {
-                       if (!(rx_desc & RXQ_DONE))
-                               mvs_slot_complete(mvi, rx_desc, 0);
-                       dev_printk(KERN_DEBUG, &pdev->dev, "RXQ_ERR %X\n",
-                               rx_desc);
-               } else if (rx_desc & RXQ_SLOT_RESET) {
-                       dev_printk(KERN_DEBUG, &pdev->dev, "Slot reset[%X]\n",
-                               rx_desc);
-                       mvs_slot_free(mvi, rx_desc);
-               }
-       }
-
-       if (attn && self_clear)
-               mvs_int_full(mvi);
-
-       return 0;
-}
-
-#ifdef MVS_USE_TASKLET
-static void mvs_tasklet(unsigned long data)
-{
-       struct mvs_info *mvi = (struct mvs_info *) data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&mvi->lock, flags);
-
-#ifdef MVS_DISABLE_MSI
-       mvs_int_full(mvi);
-#else
-       mvs_int_rx(mvi, true);
-#endif
-       spin_unlock_irqrestore(&mvi->lock, flags);
-}
-#endif
-
-static irqreturn_t mvs_interrupt(int irq, void *opaque)
-{
-       struct mvs_info *mvi = opaque;
-       void __iomem *regs = mvi->regs;
-       u32 stat;
-
-       stat = mr32(GBL_INT_STAT);
-
-       if (stat == 0 || stat == 0xffffffff)
-               return IRQ_NONE;
-
-       /* clear CMD_CMPLT ASAP */
-       mw32_f(INT_STAT, CINT_DONE);
-
-#ifndef MVS_USE_TASKLET
-       spin_lock(&mvi->lock);
-
-       mvs_int_full(mvi);
-
-       spin_unlock(&mvi->lock);
-#else
-       tasklet_schedule(&mvi->tasklet);
-#endif
-       return IRQ_HANDLED;
-}
-
-#ifndef MVS_DISABLE_MSI
-static irqreturn_t mvs_msi_interrupt(int irq, void *opaque)
-{
-       struct mvs_info *mvi = opaque;
-
-#ifndef MVS_USE_TASKLET
-       spin_lock(&mvi->lock);
-
-       mvs_int_rx(mvi, true);
-
-       spin_unlock(&mvi->lock);
-#else
-       tasklet_schedule(&mvi->tasklet);
-#endif
-       return IRQ_HANDLED;
-}
-#endif
-
-struct mvs_task_exec_info {
-       struct sas_task *task;
-       struct mvs_cmd_hdr *hdr;
-       struct mvs_port *port;
-       u32 tag;
-       int n_elem;
-};
-
-static int mvs_task_prep_smp(struct mvs_info *mvi,
-                            struct mvs_task_exec_info *tei)
-{
-       int elem, rc, i;
-       struct sas_task *task = tei->task;
-       struct mvs_cmd_hdr *hdr = tei->hdr;
-       struct scatterlist *sg_req, *sg_resp;
-       u32 req_len, resp_len, tag = tei->tag;
-       void *buf_tmp;
-       u8 *buf_oaf;
-       dma_addr_t buf_tmp_dma;
-       struct mvs_prd *buf_prd;
-       struct scatterlist *sg;
-       struct mvs_slot_info *slot = &mvi->slot_info[tag];
-       struct asd_sas_port *sas_port = task->dev->port;
-       u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-#if _MV_DUMP
-       u8 *buf_cmd;
-       void *from;
-#endif
-       /*
-        * DMA-map SMP request, response buffers
-        */
-       sg_req = &task->smp_task.smp_req;
-       elem = pci_map_sg(mvi->pdev, sg_req, 1, PCI_DMA_TODEVICE);
-       if (!elem)
-               return -ENOMEM;
-       req_len = sg_dma_len(sg_req);
-
-       sg_resp = &task->smp_task.smp_resp;
-       elem = pci_map_sg(mvi->pdev, sg_resp, 1, PCI_DMA_FROMDEVICE);
-       if (!elem) {
-               rc = -ENOMEM;
-               goto err_out;
-       }
-       resp_len = sg_dma_len(sg_resp);
-
-       /* must be in dwords */
-       if ((req_len & 0x3) || (resp_len & 0x3)) {
-               rc = -EINVAL;
-               goto err_out_2;
-       }
-
-       /*
-        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-        */
-
-       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
-       buf_tmp = slot->buf;
-       buf_tmp_dma = slot->buf_dma;
-
-#if _MV_DUMP
-       buf_cmd = buf_tmp;
-       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-       buf_tmp += req_len;
-       buf_tmp_dma += req_len;
-       slot->cmd_size = req_len;
-#else
-       hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
-#endif
-
-       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-       buf_oaf = buf_tmp;
-       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-       buf_tmp += MVS_OAF_SZ;
-       buf_tmp_dma += MVS_OAF_SZ;
-
-       /* region 3: PRD table ********************************************* */
-       buf_prd = buf_tmp;
-       if (tei->n_elem)
-               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-       else
-               hdr->prd_tbl = 0;
-
-       i = sizeof(struct mvs_prd) * tei->n_elem;
-       buf_tmp += i;
-       buf_tmp_dma += i;
-
-       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-       slot->response = buf_tmp;
-       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-       /*
-        * Fill in TX ring and command slot header
-        */
-       slot->tx = mvi->tx_prod;
-       mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
-                                       TXQ_MODE_I | tag |
-                                       (sas_port->phy_mask << TXQ_PHY_SHIFT));
-
-       hdr->flags |= flags;
-       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
-       hdr->tags = cpu_to_le32(tag);
-       hdr->data_len = 0;
-
-       /* generate open address frame hdr (first 12 bytes) */
-       buf_oaf[0] = (1 << 7) | (0 << 4) | 0x01; /* initiator, SMP, ftype 1h */
-       buf_oaf[1] = task->dev->linkrate & 0xf;
-       *(u16 *)(buf_oaf + 2) = 0xFFFF;         /* SAS SPEC */
-       memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-       /* fill in PRD (scatter/gather) table, if any */
-       for_each_sg(task->scatter, sg, tei->n_elem, i) {
-               buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-               buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-               buf_prd++;
-       }
-
-#if _MV_DUMP
-       /* copy cmd table */
-       from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
-       memcpy(buf_cmd, from + sg_req->offset, req_len);
-       kunmap_atomic(from, KM_IRQ0);
-#endif
-       return 0;
-
-err_out_2:
-       pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_resp, 1,
-                    PCI_DMA_FROMDEVICE);
-err_out:
-       pci_unmap_sg(mvi->pdev, &tei->task->smp_task.smp_req, 1,
-                    PCI_DMA_TODEVICE);
-       return rc;
-}
-
-static void mvs_free_reg_set(struct mvs_info *mvi, struct mvs_port *port)
-{
-       void __iomem *regs = mvi->regs;
-       u32 tmp, offs;
-       u8 *tfs = &port->taskfileset;
-
-       if (*tfs == MVS_ID_NOT_MAPPED)
-               return;
-
-       offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
-       if (*tfs < 16) {
-               tmp = mr32(PCS);
-               mw32(PCS, tmp & ~offs);
-       } else {
-               tmp = mr32(CTL);
-               mw32(CTL, tmp & ~offs);
-       }
-
-       tmp = mr32(INT_STAT_SRS) & (1U << *tfs);
-       if (tmp)
-               mw32(INT_STAT_SRS, tmp);
-
-       *tfs = MVS_ID_NOT_MAPPED;
-}
-
-static u8 mvs_assign_reg_set(struct mvs_info *mvi, struct mvs_port *port)
-{
-       int i;
-       u32 tmp, offs;
-       void __iomem *regs = mvi->regs;
-
-       if (port->taskfileset != MVS_ID_NOT_MAPPED)
-               return 0;
-
-       tmp = mr32(PCS);
-
-       for (i = 0; i < mvi->chip->srs_sz; i++) {
-               if (i == 16)
-                       tmp = mr32(CTL);
-               offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
-               if (!(tmp & offs)) {
-                       port->taskfileset = i;
-
-                       if (i < 16)
-                               mw32(PCS, tmp | offs);
-                       else
-                               mw32(CTL, tmp | offs);
-                       tmp = mr32(INT_STAT_SRS) & (1U << i);
-                       if (tmp)
-                               mw32(INT_STAT_SRS, tmp);
-                       return 0;
-               }
-       }
-       return MVS_ID_NOT_MAPPED;
-}
-
-static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
-{
-       struct ata_queued_cmd *qc = task->uldd_task;
-
-       if (qc) {
-               if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
-                       qc->tf.command == ATA_CMD_FPDMA_READ) {
-                       *tag = qc->tag;
-                       return 1;
-               }
-       }
-
-       return 0;
-}
-
-static int mvs_task_prep_ata(struct mvs_info *mvi,
-                            struct mvs_task_exec_info *tei)
-{
-       struct sas_task *task = tei->task;
-       struct domain_device *dev = task->dev;
-       struct mvs_cmd_hdr *hdr = tei->hdr;
-       struct asd_sas_port *sas_port = dev->port;
-       struct mvs_slot_info *slot;
-       struct scatterlist *sg;
-       struct mvs_prd *buf_prd;
-       struct mvs_port *port = tei->port;
-       u32 tag = tei->tag;
-       u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
-       void *buf_tmp;
-       u8 *buf_cmd, *buf_oaf;
-       dma_addr_t buf_tmp_dma;
-       u32 i, req_len, resp_len;
-       const u32 max_resp_len = SB_RFB_MAX;
-
-       if (mvs_assign_reg_set(mvi, port) == MVS_ID_NOT_MAPPED)
-               return -EBUSY;
-
-       slot = &mvi->slot_info[tag];
-       slot->tx = mvi->tx_prod;
-       mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
-                                       (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
-                                       (sas_port->phy_mask << TXQ_PHY_SHIFT) |
-                                       (port->taskfileset << TXQ_SRS_SHIFT));
-
-       if (task->ata_task.use_ncq)
-               flags |= MCH_FPDMA;
-       if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
-               if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
-                       flags |= MCH_ATAPI;
-       }
-
-       /* FIXME: fill in port multiplier number */
-
-       hdr->flags = cpu_to_le32(flags);
-
-       /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
-       if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr->tags))
-               task->ata_task.fis.sector_count |= hdr->tags << 3;
-       else
-               hdr->tags = cpu_to_le32(tag);
-       hdr->data_len = cpu_to_le32(task->total_xfer_len);
-
-       /*
-        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-        */
-
-       /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
-       buf_cmd = buf_tmp = slot->buf;
-       buf_tmp_dma = slot->buf_dma;
-
-       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-
-       buf_tmp += MVS_ATA_CMD_SZ;
-       buf_tmp_dma += MVS_ATA_CMD_SZ;
-#if _MV_DUMP
-       slot->cmd_size = MVS_ATA_CMD_SZ;
-#endif
-
-       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-       /* used for STP.  unused for SATA? */
-       buf_oaf = buf_tmp;
-       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-       buf_tmp += MVS_OAF_SZ;
-       buf_tmp_dma += MVS_OAF_SZ;
-
-       /* region 3: PRD table ********************************************* */
-       buf_prd = buf_tmp;
-       if (tei->n_elem)
-               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-       else
-               hdr->prd_tbl = 0;
-
-       i = sizeof(struct mvs_prd) * tei->n_elem;
-       buf_tmp += i;
-       buf_tmp_dma += i;
-
-       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-       /* FIXME: probably unused, for SATA.  kept here just in case
-        * we get a STP/SATA error information record
-        */
-       slot->response = buf_tmp;
-       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-       req_len = sizeof(struct host_to_dev_fis);
-       resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
-           sizeof(struct mvs_err_info) - i;
-
-       /* request, response lengths */
-       resp_len = min(resp_len, max_resp_len);
-       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
-
-       task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
-       /* fill in command FIS and ATAPI CDB */
-       memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
-       if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
-               memcpy(buf_cmd + STP_ATAPI_CMD,
-                       task->ata_task.atapi_packet, 16);
-
-       /* generate open address frame hdr (first 12 bytes) */
-       buf_oaf[0] = (1 << 7) | (2 << 4) | 0x1; /* initiator, STP, ftype 1h */
-       buf_oaf[1] = task->dev->linkrate & 0xf;
-       *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
-       memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-       /* fill in PRD (scatter/gather) table, if any */
-       for_each_sg(task->scatter, sg, tei->n_elem, i) {
-               buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-               buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-               buf_prd++;
-       }
-
-       return 0;
-}
-
-static int mvs_task_prep_ssp(struct mvs_info *mvi,
-                            struct mvs_task_exec_info *tei)
-{
-       struct sas_task *task = tei->task;
-       struct mvs_cmd_hdr *hdr = tei->hdr;
-       struct mvs_port *port = tei->port;
-       struct mvs_slot_info *slot;
-       struct scatterlist *sg;
-       struct mvs_prd *buf_prd;
-       struct ssp_frame_hdr *ssp_hdr;
-       void *buf_tmp;
-       u8 *buf_cmd, *buf_oaf, fburst = 0;
-       dma_addr_t buf_tmp_dma;
-       u32 flags;
-       u32 resp_len, req_len, i, tag = tei->tag;
-       const u32 max_resp_len = SB_RFB_MAX;
-       u8 phy_mask;
-
-       slot = &mvi->slot_info[tag];
-
-       phy_mask = (port->wide_port_phymap) ? port->wide_port_phymap :
-               task->dev->port->phy_mask;
-       slot->tx = mvi->tx_prod;
-       mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
-                               (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
-                               (phy_mask << TXQ_PHY_SHIFT));
-
-       flags = MCH_RETRY;
-       if (task->ssp_task.enable_first_burst) {
-               flags |= MCH_FBURST;
-               fburst = (1 << 7);
-       }
-       hdr->flags = cpu_to_le32(flags |
-                                (tei->n_elem << MCH_PRD_LEN_SHIFT) |
-                                (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT));
-
-       hdr->tags = cpu_to_le32(tag);
-       hdr->data_len = cpu_to_le32(task->total_xfer_len);
-
-       /*
-        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
-        */
-
-       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
-       buf_cmd = buf_tmp = slot->buf;
-       buf_tmp_dma = slot->buf_dma;
-
-       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
-
-       buf_tmp += MVS_SSP_CMD_SZ;
-       buf_tmp_dma += MVS_SSP_CMD_SZ;
-#if _MV_DUMP
-       slot->cmd_size = MVS_SSP_CMD_SZ;
-#endif
-
-       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
-       buf_oaf = buf_tmp;
-       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
-
-       buf_tmp += MVS_OAF_SZ;
-       buf_tmp_dma += MVS_OAF_SZ;
-
-       /* region 3: PRD table ********************************************* */
-       buf_prd = buf_tmp;
-       if (tei->n_elem)
-               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
-       else
-               hdr->prd_tbl = 0;
-
-       i = sizeof(struct mvs_prd) * tei->n_elem;
-       buf_tmp += i;
-       buf_tmp_dma += i;
-
-       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
-       slot->response = buf_tmp;
-       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
-
-       resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
-           sizeof(struct mvs_err_info) - i;
-       resp_len = min(resp_len, max_resp_len);
-
-       req_len = sizeof(struct ssp_frame_hdr) + 28;
-
-       /* request, response lengths */
-       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
-
-       /* generate open address frame hdr (first 12 bytes) */
-       buf_oaf[0] = (1 << 7) | (1 << 4) | 0x1; /* initiator, SSP, ftype 1h */
-       buf_oaf[1] = task->dev->linkrate & 0xf;
-       *(u16 *)(buf_oaf + 2) = cpu_to_be16(tag);
-       memcpy(buf_oaf + 4, task->dev->sas_addr, SAS_ADDR_SIZE);
-
-       /* fill in SSP frame header (Command Table.SSP frame header) */
-       ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
-       ssp_hdr->frame_type = SSP_COMMAND;
-       memcpy(ssp_hdr->hashed_dest_addr, task->dev->hashed_sas_addr,
-              HASHED_SAS_ADDR_SIZE);
-       memcpy(ssp_hdr->hashed_src_addr,
-              task->dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
-       ssp_hdr->tag = cpu_to_be16(tag);
-
-       /* fill in command frame IU */
-       buf_cmd += sizeof(*ssp_hdr);
-       memcpy(buf_cmd, &task->ssp_task.LUN, 8);
-       buf_cmd[9] = fburst | task->ssp_task.task_attr |
-                       (task->ssp_task.task_prio << 3);
-       memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
-
-       /* fill in PRD (scatter/gather) table, if any */
-       for_each_sg(task->scatter, sg, tei->n_elem, i) {
-               buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
-               buf_prd->len = cpu_to_le32(sg_dma_len(sg));
-               buf_prd++;
-       }
-
-       return 0;
-}
-
-static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags)
-{
-       struct domain_device *dev = task->dev;
-       struct mvs_info *mvi = dev->port->ha->lldd_ha;
-       struct pci_dev *pdev = mvi->pdev;
-       void __iomem *regs = mvi->regs;
-       struct mvs_task_exec_info tei;
-       struct sas_task *t = task;
-       struct mvs_slot_info *slot;
-       u32 tag = 0xdeadbeef, rc, n_elem = 0;
-       unsigned long flags;
-       u32 n = num, pass = 0;
-
-       spin_lock_irqsave(&mvi->lock, flags);
-       do {
-               dev = t->dev;
-               tei.port = &mvi->port[dev->port->id];
-
-               if (!tei.port->port_attached) {
-                       if (sas_protocol_ata(t->task_proto)) {
-                               rc = SAS_PHY_DOWN;
-                               goto out_done;
-                       } else {
-                               struct task_status_struct *ts = &t->task_status;
-                               ts->resp = SAS_TASK_UNDELIVERED;
-                               ts->stat = SAS_PHY_DOWN;
-                               t->task_done(t);
-                               if (n > 1)
-                                       t = list_entry(t->list.next,
-                                                       struct sas_task, list);
-                               continue;
-                       }
-               }
-
-               if (!sas_protocol_ata(t->task_proto)) {
-                       if (t->num_scatter) {
-                               n_elem = pci_map_sg(mvi->pdev, t->scatter,
-                                                   t->num_scatter,
-                                                   t->data_dir);
-                               if (!n_elem) {
-                                       rc = -ENOMEM;
-                                       goto err_out;
-                               }
-                       }
-               } else {
-                       n_elem = t->num_scatter;
-               }
-
-               rc = mvs_tag_alloc(mvi, &tag);
-               if (rc)
-                       goto err_out;
-
-               slot = &mvi->slot_info[tag];
-               t->lldd_task = NULL;
-               slot->n_elem = n_elem;
-               memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
-               tei.task = t;
-               tei.hdr = &mvi->slot[tag];
-               tei.tag = tag;
-               tei.n_elem = n_elem;
-
-               switch (t->task_proto) {
-               case SAS_PROTOCOL_SMP:
-                       rc = mvs_task_prep_smp(mvi, &tei);
-                       break;
-               case SAS_PROTOCOL_SSP:
-                       rc = mvs_task_prep_ssp(mvi, &tei);
-                       break;
-               case SAS_PROTOCOL_SATA:
-               case SAS_PROTOCOL_STP:
-               case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
-                       rc = mvs_task_prep_ata(mvi, &tei);
-                       break;
-               default:
-                       dev_printk(KERN_ERR, &pdev->dev,
-                               "unknown sas_task proto: 0x%x\n",
-                               t->task_proto);
-                       rc = -EINVAL;
-                       break;
-               }
-
-               if (rc)
-                       goto err_out_tag;
-
-               slot->task = t;
-               slot->port = tei.port;
-               t->lldd_task = (void *) slot;
-               list_add_tail(&slot->list, &slot->port->list);
-               /* TODO: select normal or high priority */
-
-               spin_lock(&t->task_state_lock);
-               t->task_state_flags |= SAS_TASK_AT_INITIATOR;
-               spin_unlock(&t->task_state_lock);
-
-               mvs_hba_memory_dump(mvi, tag, t->task_proto);
-
-               ++pass;
-               mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
-               if (n > 1)
-                       t = list_entry(t->list.next, struct sas_task, list);
-       } while (--n);
-
-       rc = 0;
-       goto out_done;
-
-err_out_tag:
-       mvs_tag_free(mvi, tag);
-err_out:
-       dev_printk(KERN_ERR, &pdev->dev, "mvsas exec failed[%d]!\n", rc);
-       if (!sas_protocol_ata(t->task_proto))
-               if (n_elem)
-                       pci_unmap_sg(mvi->pdev, t->scatter, n_elem,
-                                    t->data_dir);
-out_done:
-       if (pass)
-               mw32(TX_PROD_IDX, (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
-       spin_unlock_irqrestore(&mvi->lock, flags);
-       return rc;
-}
-
-static int mvs_task_abort(struct sas_task *task)
-{
-       int rc;
-       unsigned long flags;
-       struct mvs_info *mvi = task->dev->port->ha->lldd_ha;
-       struct pci_dev *pdev = mvi->pdev;
-       int tag;
-
-       spin_lock_irqsave(&task->task_state_lock, flags);
-       if (task->task_state_flags & SAS_TASK_STATE_DONE) {
-               rc = TMF_RESP_FUNC_COMPLETE;
-               spin_unlock_irqrestore(&task->task_state_lock, flags);
-               goto out_done;
-       }
-       spin_unlock_irqrestore(&task->task_state_lock, flags);
-
-       switch (task->task_proto) {
-       case SAS_PROTOCOL_SMP:
-               dev_printk(KERN_DEBUG, &pdev->dev, "SMP Abort! \n");
-               break;
-       case SAS_PROTOCOL_SSP:
-               dev_printk(KERN_DEBUG, &pdev->dev, "SSP Abort! \n");
-               break;
-       case SAS_PROTOCOL_SATA:
-       case SAS_PROTOCOL_STP:
-       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:{
-               dev_printk(KERN_DEBUG, &pdev->dev, "STP Abort! \n");
-#if _MV_DUMP
-               dev_printk(KERN_DEBUG, &pdev->dev, "Dump D2H FIS: \n");
-               mvs_hexdump(sizeof(struct host_to_dev_fis),
-                               (void *)&task->ata_task.fis, 0);
-               dev_printk(KERN_DEBUG, &pdev->dev, "Dump ATAPI Cmd : \n");
-               mvs_hexdump(16, task->ata_task.atapi_packet, 0);
-#endif
-               spin_lock_irqsave(&task->task_state_lock, flags);
-               if (task->task_state_flags & SAS_TASK_NEED_DEV_RESET) {
-                       /* TODO */
-                       ;
-               }
-               spin_unlock_irqrestore(&task->task_state_lock, flags);
-               break;
-       }
-       default:
-               break;
-       }
-
-       if (mvs_find_tag(mvi, task, &tag)) {
-               spin_lock_irqsave(&mvi->lock, flags);
-               mvs_slot_task_free(mvi, task, &mvi->slot_info[tag], tag);
-               spin_unlock_irqrestore(&mvi->lock, flags);
-       }
-       if (!mvs_task_exec(task, 1, GFP_ATOMIC))
-               rc = TMF_RESP_FUNC_COMPLETE;
-       else
-               rc = TMF_RESP_FUNC_FAILED;
-out_done:
-       return rc;
-}
-
-static void mvs_free(struct mvs_info *mvi)
-{
-       int i;
-
-       if (!mvi)
-               return;
-
-       for (i = 0; i < MVS_SLOTS; i++) {
-               struct mvs_slot_info *slot = &mvi->slot_info[i];
-
-               if (slot->buf)
-                       dma_free_coherent(&mvi->pdev->dev, MVS_SLOT_BUF_SZ,
-                                         slot->buf, slot->buf_dma);
-       }
-
-       if (mvi->tx)
-               dma_free_coherent(&mvi->pdev->dev,
-                                 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
-                                 mvi->tx, mvi->tx_dma);
-       if (mvi->rx_fis)
-               dma_free_coherent(&mvi->pdev->dev, MVS_RX_FISL_SZ,
-                                 mvi->rx_fis, mvi->rx_fis_dma);
-       if (mvi->rx)
-               dma_free_coherent(&mvi->pdev->dev,
-                                 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
-                                 mvi->rx, mvi->rx_dma);
-       if (mvi->slot)
-               dma_free_coherent(&mvi->pdev->dev,
-                                 sizeof(*mvi->slot) * MVS_SLOTS,
-                                 mvi->slot, mvi->slot_dma);
-#ifdef MVS_ENABLE_PERI
-       if (mvi->peri_regs)
-               iounmap(mvi->peri_regs);
-#endif
-       if (mvi->regs)
-               iounmap(mvi->regs);
-       if (mvi->shost)
-               scsi_host_put(mvi->shost);
-       kfree(mvi->sas.sas_port);
-       kfree(mvi->sas.sas_phy);
-       kfree(mvi);
-}
-
-/* FIXME: locking? */
-static int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
-                          void *funcdata)
-{
-       struct mvs_info *mvi = sas_phy->ha->lldd_ha;
-       int rc = 0, phy_id = sas_phy->id;
-       u32 tmp;
-
-       tmp = mvs_read_phy_ctl(mvi, phy_id);
-
-       switch (func) {
-       case PHY_FUNC_SET_LINK_RATE:{
-                       struct sas_phy_linkrates *rates = funcdata;
-                       u32 lrmin = 0, lrmax = 0;
-
-                       lrmin = (rates->minimum_linkrate << 8);
-                       lrmax = (rates->maximum_linkrate << 12);
-
-                       if (lrmin) {
-                               tmp &= ~(0xf << 8);
-                               tmp |= lrmin;
-                       }
-                       if (lrmax) {
-                               tmp &= ~(0xf << 12);
-                               tmp |= lrmax;
-                       }
-                       mvs_write_phy_ctl(mvi, phy_id, tmp);
-                       break;
-               }
-
-       case PHY_FUNC_HARD_RESET:
-               if (tmp & PHY_RST_HARD)
-                       break;
-               mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST_HARD);
-               break;
-
-       case PHY_FUNC_LINK_RESET:
-               mvs_write_phy_ctl(mvi, phy_id, tmp | PHY_RST);
-               break;
-
-       case PHY_FUNC_DISABLE:
-       case PHY_FUNC_RELEASE_SPINUP_HOLD:
-       default:
-               rc = -EOPNOTSUPP;
-       }
-
-       return rc;
-}
-
-static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
-{
-       struct mvs_phy *phy = &mvi->phy[phy_id];
-       struct asd_sas_phy *sas_phy = &phy->sas_phy;
-
-       sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
-       sas_phy->class = SAS;
-       sas_phy->iproto = SAS_PROTOCOL_ALL;
-       sas_phy->tproto = 0;
-       sas_phy->type = PHY_TYPE_PHYSICAL;
-       sas_phy->role = PHY_ROLE_INITIATOR;
-       sas_phy->oob_mode = OOB_NOT_CONNECTED;
-       sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
-
-       sas_phy->id = phy_id;
-       sas_phy->sas_addr = &mvi->sas_addr[0];
-       sas_phy->frame_rcvd = &phy->frame_rcvd[0];
-       sas_phy->ha = &mvi->sas;
-       sas_phy->lldd_phy = phy;
-}
-
-static struct mvs_info *__devinit mvs_alloc(struct pci_dev *pdev,
-                                           const struct pci_device_id *ent)
-{
-       struct mvs_info *mvi;
-       unsigned long res_start, res_len, res_flag;
-       struct asd_sas_phy **arr_phy;
-       struct asd_sas_port **arr_port;
-       const struct mvs_chip_info *chip = &mvs_chips[ent->driver_data];
-       int i;
-
-       /*
-        * alloc and init our per-HBA mvs_info struct
-        */
-
-       mvi = kzalloc(sizeof(*mvi), GFP_KERNEL);
-       if (!mvi)
-               return NULL;
-
-       spin_lock_init(&mvi->lock);
-#ifdef MVS_USE_TASKLET
-       tasklet_init(&mvi->tasklet, mvs_tasklet, (unsigned long)mvi);
-#endif
-       mvi->pdev = pdev;
-       mvi->chip = chip;
-
-       if (pdev->device == 0x6440 && pdev->revision == 0)
-               mvi->flags |= MVF_PHY_PWR_FIX;
-
-       /*
-        * alloc and init SCSI, SAS glue
-        */
-
-       mvi->shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
-       if (!mvi->shost)
-               goto err_out;
-
-       arr_phy = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
-       arr_port = kcalloc(MVS_MAX_PHYS, sizeof(void *), GFP_KERNEL);
-       if (!arr_phy || !arr_port)
-               goto err_out;
-
-       for (i = 0; i < MVS_MAX_PHYS; i++) {
-               mvs_phy_init(mvi, i);
-               arr_phy[i] = &mvi->phy[i].sas_phy;
-               arr_port[i] = &mvi->port[i].sas_port;
-               mvi->port[i].taskfileset = MVS_ID_NOT_MAPPED;
-               mvi->port[i].wide_port_phymap = 0;
-               mvi->port[i].port_attached = 0;
-               INIT_LIST_HEAD(&mvi->port[i].list);
-       }
-
-       SHOST_TO_SAS_HA(mvi->shost) = &mvi->sas;
-       mvi->shost->transportt = mvs_stt;
-       mvi->shost->max_id = 21;
-       mvi->shost->max_lun = ~0;
-       mvi->shost->max_channel = 0;
-       mvi->shost->max_cmd_len = 16;
-
-       mvi->sas.sas_ha_name = DRV_NAME;
-       mvi->sas.dev = &pdev->dev;
-       mvi->sas.lldd_module = THIS_MODULE;
-       mvi->sas.sas_addr = &mvi->sas_addr[0];
-       mvi->sas.sas_phy = arr_phy;
-       mvi->sas.sas_port = arr_port;
-       mvi->sas.num_phys = chip->n_phy;
-       mvi->sas.lldd_max_execute_num = 1;
-       mvi->sas.lldd_queue_size = MVS_QUEUE_SIZE;
-       mvi->shost->can_queue = MVS_CAN_QUEUE;
-       mvi->shost->cmd_per_lun = MVS_SLOTS / mvi->sas.num_phys;
-       mvi->sas.lldd_ha = mvi;
-       mvi->sas.core.shost = mvi->shost;
-
-       mvs_tag_init(mvi);
-
-       /*
-        * ioremap main and peripheral registers
-        */
-
-#ifdef MVS_ENABLE_PERI
-       res_start = pci_resource_start(pdev, 2);
-       res_len = pci_resource_len(pdev, 2);
-       if (!res_start || !res_len)
-               goto err_out;
-
-       mvi->peri_regs = ioremap_nocache(res_start, res_len);
-       if (!mvi->peri_regs)
-               goto err_out;
-#endif
-
-       res_start = pci_resource_start(pdev, 4);
-       res_len = pci_resource_len(pdev, 4);
-       if (!res_start || !res_len)
-               goto err_out;
-
-       res_flag = pci_resource_flags(pdev, 4);
-       if (res_flag & IORESOURCE_CACHEABLE)
-               mvi->regs = ioremap(res_start, res_len);
-       else
-               mvi->regs = ioremap_nocache(res_start, res_len);
-
-       if (!mvi->regs)
-               goto err_out;
-
-       /*
-        * alloc and init our DMA areas
-        */
-
-       mvi->tx = dma_alloc_coherent(&pdev->dev,
-                                    sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
-                                    &mvi->tx_dma, GFP_KERNEL);
-       if (!mvi->tx)
-               goto err_out;
-       memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
-
-       mvi->rx_fis = dma_alloc_coherent(&pdev->dev, MVS_RX_FISL_SZ,
-                                        &mvi->rx_fis_dma, GFP_KERNEL);
-       if (!mvi->rx_fis)
-               goto err_out;
-       memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
-
-       mvi->rx = dma_alloc_coherent(&pdev->dev,
-                                    sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
-                                    &mvi->rx_dma, GFP_KERNEL);
-       if (!mvi->rx)
-               goto err_out;
-       memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
-
-       mvi->rx[0] = cpu_to_le32(0xfff);
-       mvi->rx_cons = 0xfff;
-
-       mvi->slot = dma_alloc_coherent(&pdev->dev,
-                                      sizeof(*mvi->slot) * MVS_SLOTS,
-                                      &mvi->slot_dma, GFP_KERNEL);
-       if (!mvi->slot)
-               goto err_out;
-       memset(mvi->slot, 0, sizeof(*mvi->slot) * MVS_SLOTS);
-
-       for (i = 0; i < MVS_SLOTS; i++) {
-               struct mvs_slot_info *slot = &mvi->slot_info[i];
-
-               slot->buf = dma_alloc_coherent(&pdev->dev, MVS_SLOT_BUF_SZ,
-                                              &slot->buf_dma, GFP_KERNEL);
-               if (!slot->buf)
-                       goto err_out;
-               memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
-       }
-
-       /* finally, read NVRAM to get our SAS address */
-       if (mvs_nvram_read(mvi, NVR_SAS_ADDR, &mvi->sas_addr, 8))
-               goto err_out;
-       return mvi;
-
-err_out:
-       mvs_free(mvi);
-       return NULL;
-}
-
-static u32 mvs_cr32(void __iomem *regs, u32 addr)
-{
-       mw32(CMD_ADDR, addr);
-       return mr32(CMD_DATA);
-}
-
-static void mvs_cw32(void __iomem *regs, u32 addr, u32 val)
-{
-       mw32(CMD_ADDR, addr);
-       mw32(CMD_DATA, val);
-}
-
-static u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
-{
-       void __iomem *regs = mvi->regs;
-       return (port < 4)?mr32(P0_SER_CTLSTAT + port * 4):
-               mr32(P4_SER_CTLSTAT + (port - 4) * 4);
-}
-
-static void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
-{
-       void __iomem *regs = mvi->regs;
-       if (port < 4)
-               mw32(P0_SER_CTLSTAT + port * 4, val);
-       else
-               mw32(P4_SER_CTLSTAT + (port - 4) * 4, val);
-}
-
-static u32 mvs_read_port(struct mvs_info *mvi, u32 off, u32 off2, u32 port)
-{
-       void __iomem *regs = mvi->regs + off;
-       void __iomem *regs2 = mvi->regs + off2;
-       return (port < 4)?readl(regs + port * 8):
-               readl(regs2 + (port - 4) * 8);
-}
-
-static void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
-                               u32 port, u32 val)
-{
-       void __iomem *regs = mvi->regs + off;
-       void __iomem *regs2 = mvi->regs + off2;
-       if (port < 4)
-               writel(val, regs + port * 8);
-       else
-               writel(val, regs2 + (port - 4) * 8);
-}
-
-static u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
-{
-       return mvs_read_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port);
-}
-
-static void mvs_write_port_cfg_data(struct mvs_info *mvi, u32 port, u32 val)
-{
-       mvs_write_port(mvi, MVS_P0_CFG_DATA, MVS_P4_CFG_DATA, port, val);
-}
-
-static void mvs_write_port_cfg_addr(struct mvs_info *mvi, u32 port, u32 addr)
-{
-       mvs_write_port(mvi, MVS_P0_CFG_ADDR, MVS_P4_CFG_ADDR, port, addr);
-}
-
-static u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
-{
-       return mvs_read_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port);
-}
-
-static void mvs_write_port_vsr_data(struct mvs_info *mvi, u32 port, u32 val)
-{
-       mvs_write_port(mvi, MVS_P0_VSR_DATA, MVS_P4_VSR_DATA, port, val);
-}
-
-static void mvs_write_port_vsr_addr(struct mvs_info *mvi, u32 port, u32 addr)
-{
-       mvs_write_port(mvi, MVS_P0_VSR_ADDR, MVS_P4_VSR_ADDR, port, addr);
-}
-
-static u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
-{
-       return mvs_read_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port);
-}
-
-static void mvs_write_port_irq_stat(struct mvs_info *mvi, u32 port, u32 val)
-{
-       mvs_write_port(mvi, MVS_P0_INT_STAT, MVS_P4_INT_STAT, port, val);
-}
-
-static u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
-{
-       return mvs_read_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port);
-}
-
-static void mvs_write_port_irq_mask(struct mvs_info *mvi, u32 port, u32 val)
-{
-       mvs_write_port(mvi, MVS_P0_INT_MASK, MVS_P4_INT_MASK, port, val);
-}
-
-static void __devinit mvs_phy_hacks(struct mvs_info *mvi)
-{
-       void __iomem *regs = mvi->regs;
-       u32 tmp;
-
-       /* workaround for SATA R-ERR, to ignore phy glitch */
-       tmp = mvs_cr32(regs, CMD_PHY_TIMER);
-       tmp &= ~(1 << 9);
-       tmp |= (1 << 10);
-       mvs_cw32(regs, CMD_PHY_TIMER, tmp);
-
-       /* enable retry 127 times */
-       mvs_cw32(regs, CMD_SAS_CTL1, 0x7f7f);
-
-       /* extend open frame timeout to max */
-       tmp = mvs_cr32(regs, CMD_SAS_CTL0);
-       tmp &= ~0xffff;
-       tmp |= 0x3fff;
-       mvs_cw32(regs, CMD_SAS_CTL0, tmp);
-
-       /* workaround for WDTIMEOUT , set to 550 ms */
-       mvs_cw32(regs, CMD_WD_TIMER, 0x86470);
-
-       /* not to halt for different port op during wideport link change */
-       mvs_cw32(regs, CMD_APP_ERR_CONFIG, 0xffefbf7d);
-
-       /* workaround for Seagate disk not-found OOB sequence, recv
-        * COMINIT before sending out COMWAKE */
-       tmp = mvs_cr32(regs, CMD_PHY_MODE_21);
-       tmp &= 0x0000ffff;
-       tmp |= 0x00fa0000;
-       mvs_cw32(regs, CMD_PHY_MODE_21, tmp);
-
-       tmp = mvs_cr32(regs, CMD_PHY_TIMER);
-       tmp &= 0x1fffffff;
-       tmp |= (2U << 29);      /* 8 ms retry */
-       mvs_cw32(regs, CMD_PHY_TIMER, tmp);
-
-       /* TEST - for phy decoding error, adjust voltage levels */
-       mw32(P0_VSR_ADDR + 0, 0x8);
-       mw32(P0_VSR_DATA + 0, 0x2F0);
-
-       mw32(P0_VSR_ADDR + 8, 0x8);
-       mw32(P0_VSR_DATA + 8, 0x2F0);
-
-       mw32(P0_VSR_ADDR + 16, 0x8);
-       mw32(P0_VSR_DATA + 16, 0x2F0);
-
-       mw32(P0_VSR_ADDR + 24, 0x8);
-       mw32(P0_VSR_DATA + 24, 0x2F0);
-
-}
-
-static void mvs_enable_xmt(struct mvs_info *mvi, int PhyId)
-{
-       void __iomem *regs = mvi->regs;
-       u32 tmp;
-
-       tmp = mr32(PCS);
-       if (mvi->chip->n_phy <= 4)
-               tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT);
-       else
-               tmp |= 1 << (PhyId + PCS_EN_PORT_XMT_SHIFT2);
-       mw32(PCS, tmp);
-}
-
-static void mvs_detect_porttype(struct mvs_info *mvi, int i)
-{
-       void __iomem *regs = mvi->regs;
-       u32 reg;
-       struct mvs_phy *phy = &mvi->phy[i];
-
-       /* TODO check & save device type */
-       reg = mr32(GBL_PORT_TYPE);
-
-       if (reg & MODE_SAS_SATA & (1 << i))
-               phy->phy_type |= PORT_TYPE_SAS;
-       else
-               phy->phy_type |= PORT_TYPE_SATA;
-}
-
-static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
-{
-       u32 *s = (u32 *) buf;
-
-       if (!s)
-               return NULL;
-
-       mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
-       s[3] = mvs_read_port_cfg_data(mvi, i);
-
-       mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
-       s[2] = mvs_read_port_cfg_data(mvi, i);
-
-       mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
-       s[1] = mvs_read_port_cfg_data(mvi, i);
-
-       mvs_write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
-       s[0] = mvs_read_port_cfg_data(mvi, i);
-
-       return (void *)s;
-}
-
-static u32 mvs_is_sig_fis_received(u32 irq_status)
-{
-       return irq_status & PHYEV_SIG_FIS;
-}
-
-static void mvs_update_wideport(struct mvs_info *mvi, int i)
-{
-       struct mvs_phy *phy = &mvi->phy[i];
-       struct mvs_port *port = phy->port;
-       int j, no;
-
-       for_each_phy(port->wide_port_phymap, no, j, mvi->chip->n_phy)
-               if (no & 1) {
-                       mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
-                       mvs_write_port_cfg_data(mvi, no,
-                                               port->wide_port_phymap);
-               } else {
-                       mvs_write_port_cfg_addr(mvi, no, PHYR_WIDE_PORT);
-                       mvs_write_port_cfg_data(mvi, no, 0);
-               }
-}
-
-static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
-{
-       u32 tmp;
-       struct mvs_phy *phy = &mvi->phy[i];
-       struct mvs_port *port = phy->port;;
-
-       tmp = mvs_read_phy_ctl(mvi, i);
-
-       if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
-               if (!port)
-                       phy->phy_attached = 1;
-               return tmp;
-       }
-
-       if (port) {
-               if (phy->phy_type & PORT_TYPE_SAS) {
-                       port->wide_port_phymap &= ~(1U << i);
-                       if (!port->wide_port_phymap)
-                               port->port_attached = 0;
-                       mvs_update_wideport(mvi, i);
-               } else if (phy->phy_type & PORT_TYPE_SATA)
-                       port->port_attached = 0;
-               mvs_free_reg_set(mvi, phy->port);
-               phy->port = NULL;
-               phy->phy_attached = 0;
-               phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
-       }
-       return 0;
-}
-
-static void mvs_update_phyinfo(struct mvs_info *mvi, int i,
-                                       int get_st)
-{
-       struct mvs_phy *phy = &mvi->phy[i];
-       struct pci_dev *pdev = mvi->pdev;
-       u32 tmp;
-       u64 tmp64;
-
-       mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
-       phy->dev_info = mvs_read_port_cfg_data(mvi, i);
-
-       mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
-       phy->dev_sas_addr = (u64) mvs_read_port_cfg_data(mvi, i) << 32;
-
-       mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
-       phy->dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
-
-       if (get_st) {
-               phy->irq_status = mvs_read_port_irq_stat(mvi, i);
-               phy->phy_status = mvs_is_phy_ready(mvi, i);
-       }
-
-       if (phy->phy_status) {
-               u32 phy_st;
-               struct asd_sas_phy *sas_phy = mvi->sas.sas_phy[i];
-
-               mvs_write_port_cfg_addr(mvi, i, PHYR_PHY_STAT);
-               phy_st = mvs_read_port_cfg_data(mvi, i);
-
-               sas_phy->linkrate =
-                       (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
-                               PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
-               phy->minimum_linkrate =
-                       (phy->phy_status &
-                               PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
-               phy->maximum_linkrate =
-                       (phy->phy_status &
-                               PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
-
-               if (phy->phy_type & PORT_TYPE_SAS) {
-                       /* Updated attached_sas_addr */
-                       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
-                       phy->att_dev_sas_addr =
-                               (u64) mvs_read_port_cfg_data(mvi, i) << 32;
-                       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
-                       phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
-                       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
-                       phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
-                       phy->identify.device_type =
-                           phy->att_dev_info & PORT_DEV_TYPE_MASK;
-
-                       if (phy->identify.device_type == SAS_END_DEV)
-                               phy->identify.target_port_protocols =
-                                                       SAS_PROTOCOL_SSP;
-                       else if (phy->identify.device_type != NO_DEVICE)
-                               phy->identify.target_port_protocols =
-                                                       SAS_PROTOCOL_SMP;
-                       if (phy_st & PHY_OOB_DTCTD)
-                               sas_phy->oob_mode = SAS_OOB_MODE;
-                       phy->frame_rcvd_size =
-                           sizeof(struct sas_identify_frame);
-               } else if (phy->phy_type & PORT_TYPE_SATA) {
-                       phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
-                       if (mvs_is_sig_fis_received(phy->irq_status)) {
-                               phy->att_dev_sas_addr = i;      /* temp */
-                               if (phy_st & PHY_OOB_DTCTD)
-                                       sas_phy->oob_mode = SATA_OOB_MODE;
-                               phy->frame_rcvd_size =
-                                   sizeof(struct dev_to_host_fis);
-                               mvs_get_d2h_reg(mvi, i,
-                                               (void *)sas_phy->frame_rcvd);
-                       } else {
-                               dev_printk(KERN_DEBUG, &pdev->dev,
-                                       "No sig fis\n");
-                               phy->phy_type &= ~(PORT_TYPE_SATA);
-                               goto out_done;
-                       }
-               }
-               tmp64 = cpu_to_be64(phy->att_dev_sas_addr);
-               memcpy(sas_phy->attached_sas_addr, &tmp64, SAS_ADDR_SIZE);
-
-               dev_printk(KERN_DEBUG, &pdev->dev,
-                       "phy[%d] Get Attached Address 0x%llX ,"
-                       " SAS Address 0x%llX\n",
-                       i,
-                       (unsigned long long)phy->att_dev_sas_addr,
-                       (unsigned long long)phy->dev_sas_addr);
-               dev_printk(KERN_DEBUG, &pdev->dev,
-                       "Rate = %x , type = %d\n",
-                       sas_phy->linkrate, phy->phy_type);
-
-               /* workaround for HW phy decoding error on 1.5g disk drive */
-               mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
-               tmp = mvs_read_port_vsr_data(mvi, i);
-               if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
-                    PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
-                       SAS_LINK_RATE_1_5_GBPS)
-                       tmp &= ~PHY_MODE6_LATECLK;
-               else
-                       tmp |= PHY_MODE6_LATECLK;
-               mvs_write_port_vsr_data(mvi, i, tmp);
-
-       }
-out_done:
-       if (get_st)
-               mvs_write_port_irq_stat(mvi, i, phy->irq_status);
-}
-
-static void mvs_port_formed(struct asd_sas_phy *sas_phy)
-{
-       struct sas_ha_struct *sas_ha = sas_phy->ha;
-       struct mvs_info *mvi = sas_ha->lldd_ha;
-       struct asd_sas_port *sas_port = sas_phy->port;
-       struct mvs_phy *phy = sas_phy->lldd_phy;
-       struct mvs_port *port = &mvi->port[sas_port->id];
-       unsigned long flags;
-
-       spin_lock_irqsave(&mvi->lock, flags);
-       port->port_attached = 1;
-       phy->port = port;
-       port->taskfileset = MVS_ID_NOT_MAPPED;
-       if (phy->phy_type & PORT_TYPE_SAS) {
-               port->wide_port_phymap = sas_port->phy_mask;
-               mvs_update_wideport(mvi, sas_phy->id);
-       }
-       spin_unlock_irqrestore(&mvi->lock, flags);
-}
-
-static int mvs_I_T_nexus_reset(struct domain_device *dev)
-{
-       return TMF_RESP_FUNC_FAILED;
-}
-
-static int __devinit mvs_hw_init(struct mvs_info *mvi)
-{
-       void __iomem *regs = mvi->regs;
-       int i;
-       u32 tmp, cctl;
-
-       /* make sure interrupts are masked immediately (paranoia) */
-       mw32(GBL_CTL, 0);
-       tmp = mr32(GBL_CTL);
-
-       /* Reset Controller */
-       if (!(tmp & HBA_RST)) {
-               if (mvi->flags & MVF_PHY_PWR_FIX) {
-                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
-                       tmp &= ~PCTL_PWR_ON;
-                       tmp |= PCTL_OFF;
-                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
-
-                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
-                       tmp &= ~PCTL_PWR_ON;
-                       tmp |= PCTL_OFF;
-                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
-               }
-
-               /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
-               mw32_f(GBL_CTL, HBA_RST);
-       }
-
-       /* wait for reset to finish; timeout is just a guess */
-       i = 1000;
-       while (i-- > 0) {
-               msleep(10);
-
-               if (!(mr32(GBL_CTL) & HBA_RST))
-                       break;
-       }
-       if (mr32(GBL_CTL) & HBA_RST) {
-               dev_printk(KERN_ERR, &mvi->pdev->dev, "HBA reset failed\n");
-               return -EBUSY;
-       }
-
-       /* Init Chip */
-       /* make sure RST is set; HBA_RST /should/ have done that for us */
-       cctl = mr32(CTL);
-       if (cctl & CCTL_RST)
-               cctl &= ~CCTL_RST;
-       else
-               mw32_f(CTL, cctl | CCTL_RST);
-
-       /* write to device control _AND_ device status register? - A.C. */
-       pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
-       tmp &= ~PRD_REQ_MASK;
-       tmp |= PRD_REQ_SIZE;
-       pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
-
-       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
-       tmp |= PCTL_PWR_ON;
-       tmp &= ~PCTL_OFF;
-       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
-
-       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
-       tmp |= PCTL_PWR_ON;
-       tmp &= ~PCTL_OFF;
-       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
-
-       mw32_f(CTL, cctl);
-
-       /* reset control */
-       mw32(PCS, 0);           /*MVS_PCS */
-
-       mvs_phy_hacks(mvi);
-
-       mw32(CMD_LIST_LO, mvi->slot_dma);
-       mw32(CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
-
-       mw32(RX_FIS_LO, mvi->rx_fis_dma);
-       mw32(RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
-
-       mw32(TX_CFG, MVS_CHIP_SLOT_SZ);
-       mw32(TX_LO, mvi->tx_dma);
-       mw32(TX_HI, (mvi->tx_dma >> 16) >> 16);
-
-       mw32(RX_CFG, MVS_RX_RING_SZ);
-       mw32(RX_LO, mvi->rx_dma);
-       mw32(RX_HI, (mvi->rx_dma >> 16) >> 16);
-
-       /* enable auto port detection */
-       mw32(GBL_PORT_TYPE, MODE_AUTO_DET_EN);
-       msleep(1100);
-       /* init and reset phys */
-       for (i = 0; i < mvi->chip->n_phy; i++) {
-               u32 lo = be32_to_cpu(*(u32 *)&mvi->sas_addr[4]);
-               u32 hi = be32_to_cpu(*(u32 *)&mvi->sas_addr[0]);
-
-               mvs_detect_porttype(mvi, i);
-
-               /* set phy local SAS address */
-               mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_LO);
-               mvs_write_port_cfg_data(mvi, i, lo);
-               mvs_write_port_cfg_addr(mvi, i, PHYR_ADDR_HI);
-               mvs_write_port_cfg_data(mvi, i, hi);
-
-               /* reset phy */
-               tmp = mvs_read_phy_ctl(mvi, i);
-               tmp |= PHY_RST;
-               mvs_write_phy_ctl(mvi, i, tmp);
-       }
-
-       msleep(100);
-
-       for (i = 0; i < mvi->chip->n_phy; i++) {
-               /* clear phy int status */
-               tmp = mvs_read_port_irq_stat(mvi, i);
-               tmp &= ~PHYEV_SIG_FIS;
-               mvs_write_port_irq_stat(mvi, i, tmp);
-
-               /* set phy int mask */
-               tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
-                       PHYEV_ID_DONE | PHYEV_DEC_ERR;
-               mvs_write_port_irq_mask(mvi, i, tmp);
-
-               msleep(100);
-               mvs_update_phyinfo(mvi, i, 1);
-               mvs_enable_xmt(mvi, i);
-       }
-
-       /* FIXME: update wide port bitmaps */
-
-       /* little endian for open address and command table, etc. */
-       /* A.C.
-        * it seems that ( from the spec ) turning on big-endian won't
-        * do us any good on big-endian machines, need further confirmation
-        */
-       cctl = mr32(CTL);
-       cctl |= CCTL_ENDIAN_CMD;
-       cctl |= CCTL_ENDIAN_DATA;
-       cctl &= ~CCTL_ENDIAN_OPEN;
-       cctl |= CCTL_ENDIAN_RSP;
-       mw32_f(CTL, cctl);
-
-       /* reset CMD queue */
-       tmp = mr32(PCS);
-       tmp |= PCS_CMD_RST;
-       mw32(PCS, tmp);
-       /* interrupt coalescing may cause missing HW interrput in some case,
-        * and the max count is 0x1ff, while our max slot is 0x200,
-        * it will make count 0.
-        */
-       tmp = 0;
-       mw32(INT_COAL, tmp);
-
-       tmp = 0x100;
-       mw32(INT_COAL_TMOUT, tmp);
-
-       /* ladies and gentlemen, start your engines */
-       mw32(TX_CFG, 0);
-       mw32(TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
-       mw32(RX_CFG, MVS_RX_RING_SZ | RX_EN);
-       /* enable CMD/CMPL_Q/RESP mode */
-       mw32(PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN | PCS_CMD_EN);
-
-       /* enable completion queue interrupt */
-       tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS);
-       mw32(INT_MASK, tmp);
-
-       /* Enable SRS interrupt */
-       mw32(INT_MASK_SRS, 0xFF);
-       return 0;
-}
-
-static void __devinit mvs_print_info(struct mvs_info *mvi)
-{
-       struct pci_dev *pdev = mvi->pdev;
-       static int printed_version;
-
-       if (!printed_version++)
-               dev_printk(KERN_INFO, &pdev->dev, "version " DRV_VERSION "\n");
-
-       dev_printk(KERN_INFO, &pdev->dev, "%u phys, addr %llx\n",
-                  mvi->chip->n_phy, SAS_ADDR(mvi->sas_addr));
-}
-
-static int __devinit mvs_pci_init(struct pci_dev *pdev,
-                                 const struct pci_device_id *ent)
-{
-       int rc;
-       struct mvs_info *mvi;
-       irq_handler_t irq_handler = mvs_interrupt;
-
-       rc = pci_enable_device(pdev);
-       if (rc)
-               return rc;
-
-       pci_set_master(pdev);
-
-       rc = pci_request_regions(pdev, DRV_NAME);
-       if (rc)
-               goto err_out_disable;
-
-       rc = pci_go_64(pdev);
-       if (rc)
-               goto err_out_regions;
-
-       mvi = mvs_alloc(pdev, ent);
-       if (!mvi) {
-               rc = -ENOMEM;
-               goto err_out_regions;
-       }
-
-       rc = mvs_hw_init(mvi);
-       if (rc)
-               goto err_out_mvi;
-
-#ifndef MVS_DISABLE_MSI
-       if (!pci_enable_msi(pdev)) {
-               u32 tmp;
-               void __iomem *regs = mvi->regs;
-               mvi->flags |= MVF_MSI;
-               irq_handler = mvs_msi_interrupt;
-               tmp = mr32(PCS);
-               mw32(PCS, tmp | PCS_SELF_CLEAR);
-       }
-#endif
-
-       rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED, DRV_NAME, mvi);
-       if (rc)
-               goto err_out_msi;
-
-       rc = scsi_add_host(mvi->shost, &pdev->dev);
-       if (rc)
-               goto err_out_irq;
-
-       rc = sas_register_ha(&mvi->sas);
-       if (rc)
-               goto err_out_shost;
-
-       pci_set_drvdata(pdev, mvi);
-
-       mvs_print_info(mvi);
-
-       mvs_hba_interrupt_enable(mvi);
-
-       scsi_scan_host(mvi->shost);
-
-       return 0;
-
-err_out_shost:
-       scsi_remove_host(mvi->shost);
-err_out_irq:
-       free_irq(pdev->irq, mvi);
-err_out_msi:
-       if (mvi->flags |= MVF_MSI)
-               pci_disable_msi(pdev);
-err_out_mvi:
-       mvs_free(mvi);
-err_out_regions:
-       pci_release_regions(pdev);
-err_out_disable:
-       pci_disable_device(pdev);
-       return rc;
-}
-
-static void __devexit mvs_pci_remove(struct pci_dev *pdev)
-{
-       struct mvs_info *mvi = pci_get_drvdata(pdev);
-
-       pci_set_drvdata(pdev, NULL);
-
-       if (mvi) {
-               sas_unregister_ha(&mvi->sas);
-               mvs_hba_interrupt_disable(mvi);
-               sas_remove_host(mvi->shost);
-               scsi_remove_host(mvi->shost);
-
-               free_irq(pdev->irq, mvi);
-               if (mvi->flags & MVF_MSI)
-                       pci_disable_msi(pdev);
-               mvs_free(mvi);
-               pci_release_regions(pdev);
-       }
-       pci_disable_device(pdev);
-}
-
-static struct sas_domain_function_template mvs_transport_ops = {
-       .lldd_execute_task      = mvs_task_exec,
-       .lldd_control_phy       = mvs_phy_control,
-       .lldd_abort_task        = mvs_task_abort,
-       .lldd_port_formed       = mvs_port_formed,
-       .lldd_I_T_nexus_reset   = mvs_I_T_nexus_reset,
-};
-
-static struct pci_device_id __devinitdata mvs_pci_table[] = {
-       { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
-       { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
-       {
-               .vendor         = PCI_VENDOR_ID_MARVELL,
-               .device         = 0x6440,
-               .subvendor      = PCI_ANY_ID,
-               .subdevice      = 0x6480,
-               .class          = 0,
-               .class_mask     = 0,
-               .driver_data    = chip_6480,
-       },
-       { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
-       { PCI_VDEVICE(MARVELL, 0x6480), chip_6480 },
-
-       { }     /* terminate list */
-};
-
-static struct pci_driver mvs_pci_driver = {
-       .name           = DRV_NAME,
-       .id_table       = mvs_pci_table,
-       .probe          = mvs_pci_init,
-       .remove         = __devexit_p(mvs_pci_remove),
-};
-
-static int __init mvs_init(void)
-{
-       int rc;
-
-       mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
-       if (!mvs_stt)
-               return -ENOMEM;
-
-       rc = pci_register_driver(&mvs_pci_driver);
-       if (rc)
-               goto err_out;
-
-       return 0;
-
-err_out:
-       sas_release_transport(mvs_stt);
-       return rc;
-}
-
-static void __exit mvs_exit(void)
-{
-       pci_unregister_driver(&mvs_pci_driver);
-       sas_release_transport(mvs_stt);
-}
-
-module_init(mvs_init);
-module_exit(mvs_exit);
-
-MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
-MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
-MODULE_VERSION(DRV_VERSION);
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, mvs_pci_table);
diff --git a/drivers/scsi/mvsas/Kconfig b/drivers/scsi/mvsas/Kconfig
new file mode 100644 (file)
index 0000000..6de7af2
--- /dev/null
@@ -0,0 +1,42 @@
+#
+# Kernel configuration file for 88SE64XX/88SE94XX SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This file is part of the 88SE64XX/88SE94XX driver.
+#
+# The 88SE64XX/88SE94XX driver is free software; you can redistribute
+# it and/or modify it under the terms of the GNU General Public License
+# as published by the Free Software Foundation; version 2 of the
+# License.
+#
+# The 88SE64XX/88SE94XX driver is distributed in the hope that it will be
+# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with 88SE64XX/88SE94XX Driver; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
+#
+#
+
+config SCSI_MVSAS
+       tristate "Marvell 88SE64XX/88SE94XX SAS/SATA support"
+       depends on PCI
+       select SCSI_SAS_LIBSAS
+       select FW_LOADER
+       help
+               This driver supports Marvell's SAS/SATA 3Gb/s PCI-E 88SE64XX and 6Gb/s
+               PCI-E 88SE94XX chip based host adapters.
+
+config SCSI_MVSAS_DEBUG
+       bool "Compile in debug mode"
+       default y
+       depends on SCSI_MVSAS
+       help
+               Compiles the 88SE64XX/88SE94XX driver in debug mode.  In debug mode,
+               the driver prints some messages to the console.
diff --git a/drivers/scsi/mvsas/Makefile b/drivers/scsi/mvsas/Makefile
new file mode 100644 (file)
index 0000000..52ac426
--- /dev/null
@@ -0,0 +1,32 @@
+#
+# Makefile for Marvell 88SE64xx/88SE84xx SAS/SATA driver.
+#
+# Copyright 2007 Red Hat, Inc.
+# Copyright 2008 Marvell. <kewei@marvell.com>
+#
+# This file is licensed under GPLv2.
+#
+# This program is free software; you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as
+# published by the Free Software Foundation; version 2 of the
+# License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+# General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program; if not, write to the Free Software
+# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+# USA
+
+ifeq ($(CONFIG_SCSI_MVSAS_DEBUG),y)
+       EXTRA_CFLAGS += -DMV_DEBUG
+endif
+
+obj-$(CONFIG_SCSI_MVSAS) += mvsas.o
+mvsas-y +=  mv_init.o  \
+           mv_sas.o   \
+           mv_64xx.o  \
+           mv_94xx.o
diff --git a/drivers/scsi/mvsas/mv_64xx.c b/drivers/scsi/mvsas/mv_64xx.c
new file mode 100644 (file)
index 0000000..10a5077
--- /dev/null
@@ -0,0 +1,793 @@
+/*
+ * Marvell 88SE64xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_64xx.h"
+#include "mv_chips.h"
+
+static void mvs_64xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+       void __iomem *regs = mvi->regs;
+       u32 reg;
+       struct mvs_phy *phy = &mvi->phy[i];
+
+       /* TODO check & save device type */
+       reg = mr32(MVS_GBL_PORT_TYPE);
+       phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+       if (reg & MODE_SAS_SATA & (1 << i))
+               phy->phy_type |= PORT_TYPE_SAS;
+       else
+               phy->phy_type |= PORT_TYPE_SATA;
+}
+
+static void __devinit mvs_64xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       tmp = mr32(MVS_PCS);
+       if (mvi->chip->n_phy <= 4)
+               tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT);
+       else
+               tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+       mw32(MVS_PCS, tmp);
+}
+
+static void __devinit mvs_64xx_phy_hacks(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+
+       mvs_phy_hacks(mvi);
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               /* TEST - for phy decoding error, adjust voltage levels */
+               mw32(MVS_P0_VSR_ADDR + 0, 0x8);
+               mw32(MVS_P0_VSR_DATA + 0, 0x2F0);
+
+               mw32(MVS_P0_VSR_ADDR + 8, 0x8);
+               mw32(MVS_P0_VSR_DATA + 8, 0x2F0);
+
+               mw32(MVS_P0_VSR_ADDR + 16, 0x8);
+               mw32(MVS_P0_VSR_DATA + 16, 0x2F0);
+
+               mw32(MVS_P0_VSR_ADDR + 24, 0x8);
+               mw32(MVS_P0_VSR_DATA + 24, 0x2F0);
+       } else {
+               int i;
+               /* disable auto port detection */
+               mw32(MVS_GBL_PORT_TYPE, 0);
+               for (i = 0; i < mvi->chip->n_phy; i++) {
+                       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE7);
+                       mvs_write_port_vsr_data(mvi, i, 0x90000000);
+                       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE9);
+                       mvs_write_port_vsr_data(mvi, i, 0x50f2);
+                       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE11);
+                       mvs_write_port_vsr_data(mvi, i, 0x0e);
+               }
+       }
+}
+
+static void mvs_64xx_stp_reset(struct mvs_info *mvi, u32 phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 reg, tmp;
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               if (phy_id < 4)
+                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &reg);
+               else
+                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &reg);
+
+       } else
+               reg = mr32(MVS_PHY_CTL);
+
+       tmp = reg;
+       if (phy_id < 4)
+               tmp |= (1U << phy_id) << PCTL_LINK_OFFS;
+       else
+               tmp |= (1U << (phy_id - 4)) << PCTL_LINK_OFFS;
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               if (phy_id < 4) {
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+                       mdelay(10);
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, reg);
+               } else {
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+                       mdelay(10);
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, reg);
+               }
+       } else {
+               mw32(MVS_PHY_CTL, tmp);
+               mdelay(10);
+               mw32(MVS_PHY_CTL, reg);
+       }
+}
+
+static void mvs_64xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+       u32 tmp;
+       tmp = mvs_read_port_irq_stat(mvi, phy_id);
+       tmp &= ~PHYEV_RDY_CH;
+       mvs_write_port_irq_stat(mvi, phy_id, tmp);
+       tmp = mvs_read_phy_ctl(mvi, phy_id);
+       if (hard)
+               tmp |= PHY_RST_HARD;
+       else
+               tmp |= PHY_RST;
+       mvs_write_phy_ctl(mvi, phy_id, tmp);
+       if (hard) {
+               do {
+                       tmp = mvs_read_phy_ctl(mvi, phy_id);
+               } while (tmp & PHY_RST_HARD);
+       }
+}
+
+static int __devinit mvs_64xx_chip_reset(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+       int i;
+
+       /* make sure interrupts are masked immediately (paranoia) */
+       mw32(MVS_GBL_CTL, 0);
+       tmp = mr32(MVS_GBL_CTL);
+
+       /* Reset Controller */
+       if (!(tmp & HBA_RST)) {
+               if (mvi->flags & MVF_PHY_PWR_FIX) {
+                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+                       tmp &= ~PCTL_PWR_OFF;
+                       tmp |= PCTL_PHY_DSBL;
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+                       pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+                       tmp &= ~PCTL_PWR_OFF;
+                       tmp |= PCTL_PHY_DSBL;
+                       pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+               }
+       }
+
+       /* make sure interrupts are masked immediately (paranoia) */
+       mw32(MVS_GBL_CTL, 0);
+       tmp = mr32(MVS_GBL_CTL);
+
+       /* Reset Controller */
+       if (!(tmp & HBA_RST)) {
+               /* global reset, incl. COMRESET/H_RESET_N (self-clearing) */
+               mw32_f(MVS_GBL_CTL, HBA_RST);
+       }
+
+       /* wait for reset to finish; timeout is just a guess */
+       i = 1000;
+       while (i-- > 0) {
+               msleep(10);
+
+               if (!(mr32(MVS_GBL_CTL) & HBA_RST))
+                       break;
+       }
+       if (mr32(MVS_GBL_CTL) & HBA_RST) {
+               dev_printk(KERN_ERR, mvi->dev, "HBA reset failed\n");
+               return -EBUSY;
+       }
+       return 0;
+}
+
+static void mvs_64xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               u32 offs;
+               if (phy_id < 4)
+                       offs = PCR_PHY_CTL;
+               else {
+                       offs = PCR_PHY_CTL2;
+                       phy_id -= 4;
+               }
+               pci_read_config_dword(mvi->pdev, offs, &tmp);
+               tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+               pci_write_config_dword(mvi->pdev, offs, tmp);
+       } else {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp |= 1U << (PCTL_PHY_DSBL_OFFS + phy_id);
+               mw32(MVS_PHY_CTL, tmp);
+       }
+}
+
+static void mvs_64xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               u32 offs;
+               if (phy_id < 4)
+                       offs = PCR_PHY_CTL;
+               else {
+                       offs = PCR_PHY_CTL2;
+                       phy_id -= 4;
+               }
+               pci_read_config_dword(mvi->pdev, offs, &tmp);
+               tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+               pci_write_config_dword(mvi->pdev, offs, tmp);
+       } else {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~(1U << (PCTL_PHY_DSBL_OFFS + phy_id));
+               mw32(MVS_PHY_CTL, tmp);
+       }
+}
+
+static int __devinit mvs_64xx_init(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       int i;
+       u32 tmp, cctl;
+
+       if (mvi->pdev && mvi->pdev->revision == 0)
+               mvi->flags |= MVF_PHY_PWR_FIX;
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               mvs_show_pcie_usage(mvi);
+               tmp = mvs_64xx_chip_reset(mvi);
+               if (tmp)
+                       return tmp;
+       } else {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp |= PCTL_PHY_DSBL;
+               mw32(MVS_PHY_CTL, tmp);
+       }
+
+       /* Init Chip */
+       /* make sure RST is set; HBA_RST /should/ have done that for us */
+       cctl = mr32(MVS_CTL) & 0xFFFF;
+       if (cctl & CCTL_RST)
+               cctl &= ~CCTL_RST;
+       else
+               mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               /* write to device control _AND_ device status register */
+               pci_read_config_dword(mvi->pdev, PCR_DEV_CTRL, &tmp);
+               tmp &= ~PRD_REQ_MASK;
+               tmp |= PRD_REQ_SIZE;
+               pci_write_config_dword(mvi->pdev, PCR_DEV_CTRL, tmp);
+
+               pci_read_config_dword(mvi->pdev, PCR_PHY_CTL, &tmp);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp &= ~PCTL_PHY_DSBL;
+               pci_write_config_dword(mvi->pdev, PCR_PHY_CTL, tmp);
+
+               pci_read_config_dword(mvi->pdev, PCR_PHY_CTL2, &tmp);
+               tmp &= PCTL_PWR_OFF;
+               tmp &= ~PCTL_PHY_DSBL;
+               pci_write_config_dword(mvi->pdev, PCR_PHY_CTL2, tmp);
+       } else {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp |= PCTL_COM_ON;
+               tmp &= ~PCTL_PHY_DSBL;
+               tmp |= PCTL_LINK_RST;
+               mw32(MVS_PHY_CTL, tmp);
+               msleep(100);
+               tmp &= ~PCTL_LINK_RST;
+               mw32(MVS_PHY_CTL, tmp);
+               msleep(100);
+       }
+
+       /* reset control */
+       mw32(MVS_PCS, 0);               /* MVS_PCS */
+       /* init phys */
+       mvs_64xx_phy_hacks(mvi);
+
+       /* enable auto port detection */
+       mw32(MVS_GBL_PORT_TYPE, MODE_AUTO_DET_EN);
+
+       mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+       mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+       mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+       mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+       mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+       mw32(MVS_TX_LO, mvi->tx_dma);
+       mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+       mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+       mw32(MVS_RX_LO, mvi->rx_dma);
+       mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               /* set phy local SAS address */
+               /* should set little endian SAS address to 64xx chip */
+               mvs_set_sas_addr(mvi, i, PHYR_ADDR_LO, PHYR_ADDR_HI,
+                               cpu_to_be64(mvi->phy[i].dev_sas_addr));
+
+               mvs_64xx_enable_xmt(mvi, i);
+
+               mvs_64xx_phy_reset(mvi, i, 1);
+               msleep(500);
+               mvs_64xx_detect_porttype(mvi, i);
+       }
+       if (mvi->flags & MVF_FLAG_SOC) {
+               /* set select registers */
+               writel(0x0E008000, regs + 0x000);
+               writel(0x59000008, regs + 0x004);
+               writel(0x20, regs + 0x008);
+               writel(0x20, regs + 0x00c);
+               writel(0x20, regs + 0x010);
+               writel(0x20, regs + 0x014);
+               writel(0x20, regs + 0x018);
+               writel(0x20, regs + 0x01c);
+       }
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               /* clear phy int status */
+               tmp = mvs_read_port_irq_stat(mvi, i);
+               tmp &= ~PHYEV_SIG_FIS;
+               mvs_write_port_irq_stat(mvi, i, tmp);
+
+               /* set phy int mask */
+               tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH | PHYEV_UNASSOC_FIS |
+                       PHYEV_ID_DONE | PHYEV_DCDR_ERR | PHYEV_CRC_ERR |
+                       PHYEV_DEC_ERR;
+               mvs_write_port_irq_mask(mvi, i, tmp);
+
+               msleep(100);
+               mvs_update_phyinfo(mvi, i, 1);
+       }
+
+       /* FIXME: update wide port bitmaps */
+
+       /* little endian for open address and command table, etc. */
+       /*
+        * it seems that ( from the spec ) turning on big-endian won't
+        * do us any good on big-endian machines, need further confirmation
+        */
+       cctl = mr32(MVS_CTL);
+       cctl |= CCTL_ENDIAN_CMD;
+       cctl |= CCTL_ENDIAN_DATA;
+       cctl &= ~CCTL_ENDIAN_OPEN;
+       cctl |= CCTL_ENDIAN_RSP;
+       mw32_f(MVS_CTL, cctl);
+
+       /* reset CMD queue */
+       tmp = mr32(MVS_PCS);
+       tmp |= PCS_CMD_RST;
+       mw32(MVS_PCS, tmp);
+       /* interrupt coalescing may cause missing HW interrput in some case,
+        * and the max count is 0x1ff, while our max slot is 0x200,
+        * it will make count 0.
+        */
+       tmp = 0;
+       mw32(MVS_INT_COAL, tmp);
+
+       tmp = 0x100;
+       mw32(MVS_INT_COAL_TMOUT, tmp);
+
+       /* ladies and gentlemen, start your engines */
+       mw32(MVS_TX_CFG, 0);
+       mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+       mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+       /* enable CMD/CMPL_Q/RESP mode */
+       mw32(MVS_PCS, PCS_SATA_RETRY | PCS_FIS_RX_EN |
+               PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+       /* enable completion queue interrupt */
+       tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+               CINT_DMA_PCIE);
+
+       mw32(MVS_INT_MASK, tmp);
+
+       /* Enable SRS interrupt */
+       mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+       return 0;
+}
+
+static int mvs_64xx_ioremap(struct mvs_info *mvi)
+{
+       if (!mvs_ioremap(mvi, 4, 2))
+               return 0;
+       return -1;
+}
+
+static void mvs_64xx_iounmap(struct mvs_info *mvi)
+{
+       mvs_iounmap(mvi->regs);
+       mvs_iounmap(mvi->regs_ex);
+}
+
+static void mvs_64xx_interrupt_enable(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       tmp = mr32(MVS_GBL_CTL);
+       mw32(MVS_GBL_CTL, tmp | INT_EN);
+}
+
+static void mvs_64xx_interrupt_disable(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       tmp = mr32(MVS_GBL_CTL);
+       mw32(MVS_GBL_CTL, tmp & ~INT_EN);
+}
+
+static u32 mvs_64xx_isr_status(struct mvs_info *mvi, int irq)
+{
+       void __iomem *regs = mvi->regs;
+       u32 stat;
+
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               stat = mr32(MVS_GBL_INT_STAT);
+
+               if (stat == 0 || stat == 0xffffffff)
+                       return 0;
+       } else
+               stat = 1;
+       return stat;
+}
+
+static irqreturn_t mvs_64xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+       void __iomem *regs = mvi->regs;
+
+       /* clear CMD_CMPLT ASAP */
+       mw32_f(MVS_INT_STAT, CINT_DONE);
+#ifndef MVS_USE_TASKLET
+       spin_lock(&mvi->lock);
+#endif
+       mvs_int_full(mvi);
+#ifndef MVS_USE_TASKLET
+       spin_unlock(&mvi->lock);
+#endif
+       return IRQ_HANDLED;
+}
+
+static void mvs_64xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+       u32 tmp;
+       mvs_cw32(mvi, 0x40 + (slot_idx >> 3), 1 << (slot_idx % 32));
+       mvs_cw32(mvi, 0x00 + (slot_idx >> 3), 1 << (slot_idx % 32));
+       do {
+               tmp = mvs_cr32(mvi, 0x00 + (slot_idx >> 3));
+       } while (tmp & 1 << (slot_idx % 32));
+       do {
+               tmp = mvs_cr32(mvi, 0x40 + (slot_idx >> 3));
+       } while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_64xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+                               u32 tfs)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       if (type == PORT_TYPE_SATA) {
+               tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+               mw32(MVS_INT_STAT_SRS_0, tmp);
+       }
+       mw32(MVS_INT_STAT, CINT_CI_STOP);
+       tmp = mr32(MVS_PCS) | 0xFF00;
+       mw32(MVS_PCS, tmp);
+}
+
+static void mvs_64xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp, offs;
+
+       if (*tfs == MVS_ID_NOT_MAPPED)
+               return;
+
+       offs = 1U << ((*tfs & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+       if (*tfs < 16) {
+               tmp = mr32(MVS_PCS);
+               mw32(MVS_PCS, tmp & ~offs);
+       } else {
+               tmp = mr32(MVS_CTL);
+               mw32(MVS_CTL, tmp & ~offs);
+       }
+
+       tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << *tfs);
+       if (tmp)
+               mw32(MVS_INT_STAT_SRS_0, tmp);
+
+       *tfs = MVS_ID_NOT_MAPPED;
+       return;
+}
+
+static u8 mvs_64xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+       int i;
+       u32 tmp, offs;
+       void __iomem *regs = mvi->regs;
+
+       if (*tfs != MVS_ID_NOT_MAPPED)
+               return 0;
+
+       tmp = mr32(MVS_PCS);
+
+       for (i = 0; i < mvi->chip->srs_sz; i++) {
+               if (i == 16)
+                       tmp = mr32(MVS_CTL);
+               offs = 1U << ((i & 0x0f) + PCS_EN_SATA_REG_SHIFT);
+               if (!(tmp & offs)) {
+                       *tfs = i;
+
+                       if (i < 16)
+                               mw32(MVS_PCS, tmp | offs);
+                       else
+                               mw32(MVS_CTL, tmp | offs);
+                       tmp = mr32(MVS_INT_STAT_SRS_0) & (1U << i);
+                       if (tmp)
+                               mw32(MVS_INT_STAT_SRS_0, tmp);
+                       return 0;
+               }
+       }
+       return MVS_ID_NOT_MAPPED;
+}
+
+void mvs_64xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+       int i;
+       struct scatterlist *sg;
+       struct mvs_prd *buf_prd = prd;
+       for_each_sg(scatter, sg, nr, i) {
+               buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+               buf_prd->len = cpu_to_le32(sg_dma_len(sg));
+               buf_prd++;
+       }
+}
+
+static int mvs_64xx_oob_done(struct mvs_info *mvi, int i)
+{
+       u32 phy_st;
+       mvs_write_port_cfg_addr(mvi, i,
+                       PHYR_PHY_STAT);
+       phy_st = mvs_read_port_cfg_data(mvi, i);
+       if (phy_st & PHY_OOB_DTCTD)
+               return 1;
+       return 0;
+}
+
+static void mvs_64xx_fix_phy_info(struct mvs_info *mvi, int i,
+                               struct sas_identify_frame *id)
+
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+       sas_phy->linkrate =
+               (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+                       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+
+       phy->minimum_linkrate =
+               (phy->phy_status &
+                       PHY_MIN_SPP_PHYS_LINK_RATE_MASK) >> 8;
+       phy->maximum_linkrate =
+               (phy->phy_status &
+                       PHY_MAX_SPP_PHYS_LINK_RATE_MASK) >> 12;
+
+       mvs_write_port_cfg_addr(mvi, i, PHYR_IDENTIFY);
+       phy->dev_info = mvs_read_port_cfg_data(mvi, i);
+
+       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_DEV_INFO);
+       phy->att_dev_info = mvs_read_port_cfg_data(mvi, i);
+
+       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_HI);
+       phy->att_dev_sas_addr =
+            (u64) mvs_read_port_cfg_data(mvi, i) << 32;
+       mvs_write_port_cfg_addr(mvi, i, PHYR_ATT_ADDR_LO);
+       phy->att_dev_sas_addr |= mvs_read_port_cfg_data(mvi, i);
+       phy->att_dev_sas_addr = SAS_ADDR(&phy->att_dev_sas_addr);
+}
+
+static void mvs_64xx_phy_work_around(struct mvs_info *mvi, int i)
+{
+       u32 tmp;
+       struct mvs_phy *phy = &mvi->phy[i];
+       /* workaround for HW phy decoding error on 1.5g disk drive */
+       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE6);
+       tmp = mvs_read_port_vsr_data(mvi, i);
+       if (((phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+            PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET) ==
+               SAS_LINK_RATE_1_5_GBPS)
+               tmp &= ~PHY_MODE6_LATECLK;
+       else
+               tmp |= PHY_MODE6_LATECLK;
+       mvs_write_port_vsr_data(mvi, i, tmp);
+}
+
+void mvs_64xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+                       struct sas_phy_linkrates *rates)
+{
+       u32 lrmin = 0, lrmax = 0;
+       u32 tmp;
+
+       tmp = mvs_read_phy_ctl(mvi, phy_id);
+       lrmin = (rates->minimum_linkrate << 8);
+       lrmax = (rates->maximum_linkrate << 12);
+
+       if (lrmin) {
+               tmp &= ~(0xf << 8);
+               tmp |= lrmin;
+       }
+       if (lrmax) {
+               tmp &= ~(0xf << 12);
+               tmp |= lrmax;
+       }
+       mvs_write_phy_ctl(mvi, phy_id, tmp);
+       mvs_64xx_phy_reset(mvi, phy_id, 1);
+}
+
+static void mvs_64xx_clear_active_cmds(struct mvs_info *mvi)
+{
+       u32 tmp;
+       void __iomem *regs = mvi->regs;
+       tmp = mr32(MVS_PCS);
+       mw32(MVS_PCS, tmp & 0xFFFF);
+       mw32(MVS_PCS, tmp);
+       tmp = mr32(MVS_CTL);
+       mw32(MVS_CTL, tmp & 0xFFFF);
+       mw32(MVS_CTL, tmp);
+}
+
+
+u32 mvs_64xx_spi_read_data(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs_ex;
+       return ior32(SPI_DATA_REG_64XX);
+}
+
+void mvs_64xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+       void __iomem *regs = mvi->regs_ex;
+        iow32(SPI_DATA_REG_64XX, data);
+}
+
+
+int mvs_64xx_spi_buildcmd(struct mvs_info *mvi,
+                       u32      *dwCmd,
+                       u8       cmd,
+                       u8       read,
+                       u8       length,
+                       u32      addr
+                       )
+{
+       u32  dwTmp;
+
+       dwTmp = ((u32)cmd << 24) | ((u32)length << 19);
+       if (read)
+               dwTmp |= 1U<<23;
+
+       if (addr != MV_MAX_U32) {
+               dwTmp |= 1U<<22;
+               dwTmp |= (addr & 0x0003FFFF);
+       }
+
+       *dwCmd = dwTmp;
+       return 0;
+}
+
+
+int mvs_64xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+       void __iomem *regs = mvi->regs_ex;
+       int     retry;
+
+       for (retry = 0; retry < 1; retry++) {
+               iow32(SPI_CTRL_REG_64XX, SPI_CTRL_VENDOR_ENABLE);
+               iow32(SPI_CMD_REG_64XX, cmd);
+               iow32(SPI_CTRL_REG_64XX,
+                       SPI_CTRL_VENDOR_ENABLE | SPI_CTRL_SPISTART);
+       }
+
+       return 0;
+}
+
+int mvs_64xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+       void __iomem *regs = mvi->regs_ex;
+       u32 i, dwTmp;
+
+       for (i = 0; i < timeout; i++) {
+               dwTmp = ior32(SPI_CTRL_REG_64XX);
+               if (!(dwTmp & SPI_CTRL_SPISTART))
+                       return 0;
+               msleep(10);
+       }
+
+       return -1;
+}
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+void mvs_64xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+{
+       int i;
+       struct mvs_prd *buf_prd = prd;
+       buf_prd += from;
+       for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+               buf_prd->addr = cpu_to_le64(buf_dma);
+               buf_prd->len = cpu_to_le32(buf_len);
+               ++buf_prd;
+       }
+}
+#endif
+
+const struct mvs_dispatch mvs_64xx_dispatch = {
+       "mv64xx",
+       mvs_64xx_init,
+       NULL,
+       mvs_64xx_ioremap,
+       mvs_64xx_iounmap,
+       mvs_64xx_isr,
+       mvs_64xx_isr_status,
+       mvs_64xx_interrupt_enable,
+       mvs_64xx_interrupt_disable,
+       mvs_read_phy_ctl,
+       mvs_write_phy_ctl,
+       mvs_read_port_cfg_data,
+       mvs_write_port_cfg_data,
+       mvs_write_port_cfg_addr,
+       mvs_read_port_vsr_data,
+       mvs_write_port_vsr_data,
+       mvs_write_port_vsr_addr,
+       mvs_read_port_irq_stat,
+       mvs_write_port_irq_stat,
+       mvs_read_port_irq_mask,
+       mvs_write_port_irq_mask,
+       mvs_get_sas_addr,
+       mvs_64xx_command_active,
+       mvs_64xx_issue_stop,
+       mvs_start_delivery,
+       mvs_rx_update,
+       mvs_int_full,
+       mvs_64xx_assign_reg_set,
+       mvs_64xx_free_reg_set,
+       mvs_get_prd_size,
+       mvs_get_prd_count,
+       mvs_64xx_make_prd,
+       mvs_64xx_detect_porttype,
+       mvs_64xx_oob_done,
+       mvs_64xx_fix_phy_info,
+       mvs_64xx_phy_work_around,
+       mvs_64xx_phy_set_link_rate,
+       mvs_hw_max_link_rate,
+       mvs_64xx_phy_disable,
+       mvs_64xx_phy_enable,
+       mvs_64xx_phy_reset,
+       mvs_64xx_stp_reset,
+       mvs_64xx_clear_active_cmds,
+       mvs_64xx_spi_read_data,
+       mvs_64xx_spi_write_data,
+       mvs_64xx_spi_buildcmd,
+       mvs_64xx_spi_issuecmd,
+       mvs_64xx_spi_waitdataready,
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       mvs_64xx_fix_dma,
+#endif
+};
+
diff --git a/drivers/scsi/mvsas/mv_64xx.h b/drivers/scsi/mvsas/mv_64xx.h
new file mode 100644 (file)
index 0000000..42e947d
--- /dev/null
@@ -0,0 +1,151 @@
+/*
+ * Marvell 88SE64xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS64XX_REG_H_
+#define _MVS64XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE          SAS_LINK_RATE_3_0_GBPS
+
+/* enhanced mode registers (BAR4) */
+enum hw_registers {
+       MVS_GBL_CTL             = 0x04,  /* global control */
+       MVS_GBL_INT_STAT        = 0x08,  /* global irq status */
+       MVS_GBL_PI              = 0x0C,  /* ports implemented bitmask */
+
+       MVS_PHY_CTL             = 0x40,  /* SOC PHY Control */
+       MVS_PORTS_IMP           = 0x9C,  /* SOC Port Implemented */
+
+       MVS_GBL_PORT_TYPE       = 0xa0,  /* port type */
+
+       MVS_CTL                 = 0x100, /* SAS/SATA port configuration */
+       MVS_PCS                 = 0x104, /* SAS/SATA port control/status */
+       MVS_CMD_LIST_LO         = 0x108, /* cmd list addr */
+       MVS_CMD_LIST_HI         = 0x10C,
+       MVS_RX_FIS_LO           = 0x110, /* RX FIS list addr */
+       MVS_RX_FIS_HI           = 0x114,
+
+       MVS_TX_CFG              = 0x120, /* TX configuration */
+       MVS_TX_LO               = 0x124, /* TX (delivery) ring addr */
+       MVS_TX_HI               = 0x128,
+
+       MVS_TX_PROD_IDX         = 0x12C, /* TX producer pointer */
+       MVS_TX_CONS_IDX         = 0x130, /* TX consumer pointer (RO) */
+       MVS_RX_CFG              = 0x134, /* RX configuration */
+       MVS_RX_LO               = 0x138, /* RX (completion) ring addr */
+       MVS_RX_HI               = 0x13C,
+       MVS_RX_CONS_IDX         = 0x140, /* RX consumer pointer (RO) */
+
+       MVS_INT_COAL            = 0x148, /* Int coalescing config */
+       MVS_INT_COAL_TMOUT      = 0x14C, /* Int coalescing timeout */
+       MVS_INT_STAT            = 0x150, /* Central int status */
+       MVS_INT_MASK            = 0x154, /* Central int enable */
+       MVS_INT_STAT_SRS_0      = 0x158, /* SATA register set status */
+       MVS_INT_MASK_SRS_0      = 0x15C,
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_INT_STAT         = 0x160, /* port0 interrupt status */
+       MVS_P0_INT_MASK         = 0x164, /* port0 interrupt mask */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_INT_STAT         = 0x200, /* Port4 interrupt status */
+       MVS_P4_INT_MASK         = 0x204, /* Port4 interrupt enable mask */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_SER_CTLSTAT      = 0x180, /* port0 serial control/status */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_SER_CTLSTAT      = 0x220, /* port4 serial control/status */
+
+       MVS_CMD_ADDR            = 0x1B8, /* Command register port (addr) */
+       MVS_CMD_DATA            = 0x1BC, /* Command register port (data) */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_CFG_ADDR         = 0x1C0, /* port0 phy register address */
+       MVS_P0_CFG_DATA         = 0x1C4, /* port0 phy register data */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_CFG_ADDR         = 0x230, /* Port4 config address */
+       MVS_P4_CFG_DATA         = 0x234, /* Port4 config data */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_VSR_ADDR         = 0x1E0, /* port0 VSR address */
+       MVS_P0_VSR_DATA         = 0x1E4, /* port0 VSR data */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_VSR_ADDR         = 0x250, /* port4 VSR addr */
+       MVS_P4_VSR_DATA         = 0x254, /* port4 VSR data */
+};
+
+enum pci_cfg_registers {
+       PCR_PHY_CTL             = 0x40,
+       PCR_PHY_CTL2            = 0x90,
+       PCR_DEV_CTRL            = 0xE8,
+       PCR_LINK_STAT           = 0xF2,
+};
+
+/*  SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+       VSR_PHY_STAT            = 0x00, /* Phy Status */
+       VSR_PHY_MODE1           = 0x01, /* phy tx */
+       VSR_PHY_MODE2           = 0x02, /* tx scc */
+       VSR_PHY_MODE3           = 0x03, /* pll */
+       VSR_PHY_MODE4           = 0x04, /* VCO */
+       VSR_PHY_MODE5           = 0x05, /* Rx */
+       VSR_PHY_MODE6           = 0x06, /* CDR */
+       VSR_PHY_MODE7           = 0x07, /* Impedance */
+       VSR_PHY_MODE8           = 0x08, /* Voltage */
+       VSR_PHY_MODE9           = 0x09, /* Test */
+       VSR_PHY_MODE10          = 0x0A, /* Power */
+       VSR_PHY_MODE11          = 0x0B, /* Phy Mode */
+       VSR_PHY_VS0             = 0x0C, /* Vednor Specific 0 */
+       VSR_PHY_VS1             = 0x0D, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+       PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0xF << 8),
+       PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0xF << 12),
+       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (16),
+       PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+                       (0xF << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+#define MAX_SG_ENTRY           64
+
+struct mvs_prd {
+       __le64                  addr;           /* 64-bit buffer address */
+       __le32                  reserved;
+       __le32                  len;            /* 16-bit length */
+};
+
+#define SPI_CTRL_REG                           0xc0
+#define SPI_CTRL_VENDOR_ENABLE         (1U<<29)
+#define SPI_CTRL_SPIRDY                        (1U<<22)
+#define SPI_CTRL_SPISTART                      (1U<<20)
+
+#define SPI_CMD_REG            0xc4
+#define SPI_DATA_REG           0xc8
+
+#define SPI_CTRL_REG_64XX              0x10
+#define SPI_CMD_REG_64XX               0x14
+#define SPI_DATA_REG_64XX              0x18
+
+#endif
diff --git a/drivers/scsi/mvsas/mv_94xx.c b/drivers/scsi/mvsas/mv_94xx.c
new file mode 100644 (file)
index 0000000..0940fae
--- /dev/null
@@ -0,0 +1,672 @@
+/*
+ * Marvell 88SE94xx hardware specific
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+#include "mv_94xx.h"
+#include "mv_chips.h"
+
+static void mvs_94xx_detect_porttype(struct mvs_info *mvi, int i)
+{
+       u32 reg;
+       struct mvs_phy *phy = &mvi->phy[i];
+       u32 phy_status;
+
+       mvs_write_port_vsr_addr(mvi, i, VSR_PHY_MODE3);
+       reg = mvs_read_port_vsr_data(mvi, i);
+       phy_status = ((reg & 0x3f0000) >> 16) & 0xff;
+       phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+       switch (phy_status) {
+       case 0x10:
+               phy->phy_type |= PORT_TYPE_SAS;
+               break;
+       case 0x1d:
+       default:
+               phy->phy_type |= PORT_TYPE_SATA;
+               break;
+       }
+}
+
+static void __devinit mvs_94xx_enable_xmt(struct mvs_info *mvi, int phy_id)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       tmp = mr32(MVS_PCS);
+       tmp |= 1 << (phy_id + PCS_EN_PORT_XMT_SHIFT2);
+       mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_phy_reset(struct mvs_info *mvi, u32 phy_id, int hard)
+{
+       u32 tmp;
+
+       tmp = mvs_read_port_irq_stat(mvi, phy_id);
+       tmp &= ~PHYEV_RDY_CH;
+       mvs_write_port_irq_stat(mvi, phy_id, tmp);
+       if (hard) {
+               tmp = mvs_read_phy_ctl(mvi, phy_id);
+               tmp |= PHY_RST_HARD;
+               mvs_write_phy_ctl(mvi, phy_id, tmp);
+               do {
+                       tmp = mvs_read_phy_ctl(mvi, phy_id);
+               } while (tmp & PHY_RST_HARD);
+       } else {
+               mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_STAT);
+               tmp = mvs_read_port_vsr_data(mvi, phy_id);
+               tmp |= PHY_RST;
+               mvs_write_port_vsr_data(mvi, phy_id, tmp);
+       }
+}
+
+static void mvs_94xx_phy_disable(struct mvs_info *mvi, u32 phy_id)
+{
+       u32 tmp;
+       mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+       tmp = mvs_read_port_vsr_data(mvi, phy_id);
+       mvs_write_port_vsr_data(mvi, phy_id, tmp | 0x00800000);
+}
+
+static void mvs_94xx_phy_enable(struct mvs_info *mvi, u32 phy_id)
+{
+       mvs_write_port_vsr_addr(mvi, phy_id, 0x1B4);
+       mvs_write_port_vsr_data(mvi, phy_id, 0x8300ffc1);
+       mvs_write_port_vsr_addr(mvi, phy_id, 0x104);
+       mvs_write_port_vsr_data(mvi, phy_id, 0x00018080);
+       mvs_write_port_vsr_addr(mvi, phy_id, VSR_PHY_MODE2);
+       mvs_write_port_vsr_data(mvi, phy_id, 0x00207fff);
+}
+
+static int __devinit mvs_94xx_init(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       int i;
+       u32 tmp, cctl;
+
+       mvs_show_pcie_usage(mvi);
+       if (mvi->flags & MVF_FLAG_SOC) {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp |= PCTL_PHY_DSBL;
+               mw32(MVS_PHY_CTL, tmp);
+       }
+
+       /* Init Chip */
+       /* make sure RST is set; HBA_RST /should/ have done that for us */
+       cctl = mr32(MVS_CTL) & 0xFFFF;
+       if (cctl & CCTL_RST)
+               cctl &= ~CCTL_RST;
+       else
+               mw32_f(MVS_CTL, cctl | CCTL_RST);
+
+       if (mvi->flags & MVF_FLAG_SOC) {
+               tmp = mr32(MVS_PHY_CTL);
+               tmp &= ~PCTL_PWR_OFF;
+               tmp |= PCTL_COM_ON;
+               tmp &= ~PCTL_PHY_DSBL;
+               tmp |= PCTL_LINK_RST;
+               mw32(MVS_PHY_CTL, tmp);
+               msleep(100);
+               tmp &= ~PCTL_LINK_RST;
+               mw32(MVS_PHY_CTL, tmp);
+               msleep(100);
+       }
+
+       /* reset control */
+       mw32(MVS_PCS, 0);               /* MVS_PCS */
+       mw32(MVS_STP_REG_SET_0, 0);
+       mw32(MVS_STP_REG_SET_1, 0);
+
+       /* init phys */
+       mvs_phy_hacks(mvi);
+
+       /* disable Multiplexing, enable phy implemented */
+       mw32(MVS_PORTS_IMP, 0xFF);
+
+
+       mw32(MVS_PA_VSR_ADDR, 0x00000104);
+       mw32(MVS_PA_VSR_PORT, 0x00018080);
+       mw32(MVS_PA_VSR_ADDR, VSR_PHY_MODE8);
+       mw32(MVS_PA_VSR_PORT, 0x0084ffff);
+
+       /* set LED blink when IO*/
+       mw32(MVS_PA_VSR_ADDR, 0x00000030);
+       tmp = mr32(MVS_PA_VSR_PORT);
+       tmp &= 0xFFFF00FF;
+       tmp |= 0x00003300;
+       mw32(MVS_PA_VSR_PORT, tmp);
+
+       mw32(MVS_CMD_LIST_LO, mvi->slot_dma);
+       mw32(MVS_CMD_LIST_HI, (mvi->slot_dma >> 16) >> 16);
+
+       mw32(MVS_RX_FIS_LO, mvi->rx_fis_dma);
+       mw32(MVS_RX_FIS_HI, (mvi->rx_fis_dma >> 16) >> 16);
+
+       mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ);
+       mw32(MVS_TX_LO, mvi->tx_dma);
+       mw32(MVS_TX_HI, (mvi->tx_dma >> 16) >> 16);
+
+       mw32(MVS_RX_CFG, MVS_RX_RING_SZ);
+       mw32(MVS_RX_LO, mvi->rx_dma);
+       mw32(MVS_RX_HI, (mvi->rx_dma >> 16) >> 16);
+
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               mvs_94xx_phy_disable(mvi, i);
+               /* set phy local SAS address */
+               mvs_set_sas_addr(mvi, i, CONFIG_ID_FRAME3, CONFIG_ID_FRAME4,
+                                               (mvi->phy[i].dev_sas_addr));
+
+               mvs_94xx_enable_xmt(mvi, i);
+               mvs_94xx_phy_enable(mvi, i);
+
+               mvs_94xx_phy_reset(mvi, i, 1);
+               msleep(500);
+               mvs_94xx_detect_porttype(mvi, i);
+       }
+
+       if (mvi->flags & MVF_FLAG_SOC) {
+               /* set select registers */
+               writel(0x0E008000, regs + 0x000);
+               writel(0x59000008, regs + 0x004);
+               writel(0x20, regs + 0x008);
+               writel(0x20, regs + 0x00c);
+               writel(0x20, regs + 0x010);
+               writel(0x20, regs + 0x014);
+               writel(0x20, regs + 0x018);
+               writel(0x20, regs + 0x01c);
+       }
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               /* clear phy int status */
+               tmp = mvs_read_port_irq_stat(mvi, i);
+               tmp &= ~PHYEV_SIG_FIS;
+               mvs_write_port_irq_stat(mvi, i, tmp);
+
+               /* set phy int mask */
+               tmp = PHYEV_RDY_CH | PHYEV_BROAD_CH |
+                       PHYEV_ID_DONE  | PHYEV_DCDR_ERR | PHYEV_CRC_ERR ;
+               mvs_write_port_irq_mask(mvi, i, tmp);
+
+               msleep(100);
+               mvs_update_phyinfo(mvi, i, 1);
+       }
+
+       /* FIXME: update wide port bitmaps */
+
+       /* little endian for open address and command table, etc. */
+       /*
+        * it seems that ( from the spec ) turning on big-endian won't
+        * do us any good on big-endian machines, need further confirmation
+        */
+       cctl = mr32(MVS_CTL);
+       cctl |= CCTL_ENDIAN_CMD;
+       cctl |= CCTL_ENDIAN_DATA;
+       cctl &= ~CCTL_ENDIAN_OPEN;
+       cctl |= CCTL_ENDIAN_RSP;
+       mw32_f(MVS_CTL, cctl);
+
+       /* reset CMD queue */
+       tmp = mr32(MVS_PCS);
+       tmp |= PCS_CMD_RST;
+       mw32(MVS_PCS, tmp);
+       /* interrupt coalescing may cause missing HW interrput in some case,
+        * and the max count is 0x1ff, while our max slot is 0x200,
+        * it will make count 0.
+        */
+       tmp = 0;
+       mw32(MVS_INT_COAL, tmp);
+
+       tmp = 0x100;
+       mw32(MVS_INT_COAL_TMOUT, tmp);
+
+       /* ladies and gentlemen, start your engines */
+       mw32(MVS_TX_CFG, 0);
+       mw32(MVS_TX_CFG, MVS_CHIP_SLOT_SZ | TX_EN);
+       mw32(MVS_RX_CFG, MVS_RX_RING_SZ | RX_EN);
+       /* enable CMD/CMPL_Q/RESP mode */
+       mw32(MVS_PCS, PCS_SATA_RETRY_2 | PCS_FIS_RX_EN |
+               PCS_CMD_EN | PCS_CMD_STOP_ERR);
+
+       /* enable completion queue interrupt */
+       tmp = (CINT_PORT_MASK | CINT_DONE | CINT_MEM | CINT_SRS | CINT_CI_STOP |
+               CINT_DMA_PCIE);
+       tmp |= CINT_PHY_MASK;
+       mw32(MVS_INT_MASK, tmp);
+
+       /* Enable SRS interrupt */
+       mw32(MVS_INT_MASK_SRS_0, 0xFFFF);
+
+       return 0;
+}
+
+static int mvs_94xx_ioremap(struct mvs_info *mvi)
+{
+       if (!mvs_ioremap(mvi, 2, -1)) {
+               mvi->regs_ex = mvi->regs + 0x10200;
+               mvi->regs += 0x20000;
+               if (mvi->id == 1)
+                       mvi->regs += 0x4000;
+               return 0;
+       }
+       return -1;
+}
+
+static void mvs_94xx_iounmap(struct mvs_info *mvi)
+{
+       if (mvi->regs) {
+               mvi->regs -= 0x20000;
+               if (mvi->id == 1)
+                       mvi->regs -= 0x4000;
+               mvs_iounmap(mvi->regs);
+       }
+}
+
+static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs_ex;
+       u32 tmp;
+
+       tmp = mr32(MVS_GBL_CTL);
+       tmp |= (IRQ_SAS_A | IRQ_SAS_B);
+       mw32(MVS_GBL_INT_STAT, tmp);
+       writel(tmp, regs + 0x0C);
+       writel(tmp, regs + 0x10);
+       writel(tmp, regs + 0x14);
+       writel(tmp, regs + 0x18);
+       mw32(MVS_GBL_CTL, tmp);
+}
+
+static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs_ex;
+       u32 tmp;
+
+       tmp = mr32(MVS_GBL_CTL);
+
+       tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
+       mw32(MVS_GBL_INT_STAT, tmp);
+       writel(tmp, regs + 0x0C);
+       writel(tmp, regs + 0x10);
+       writel(tmp, regs + 0x14);
+       writel(tmp, regs + 0x18);
+       mw32(MVS_GBL_CTL, tmp);
+}
+
+static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
+{
+       void __iomem *regs = mvi->regs_ex;
+       u32 stat = 0;
+       if (!(mvi->flags & MVF_FLAG_SOC)) {
+               stat = mr32(MVS_GBL_INT_STAT);
+
+               if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
+                       return 0;
+       }
+       return stat;
+}
+
+static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
+{
+       void __iomem *regs = mvi->regs;
+
+       if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
+                       ((stat & IRQ_SAS_B) && mvi->id == 1)) {
+               mw32_f(MVS_INT_STAT, CINT_DONE);
+       #ifndef MVS_USE_TASKLET
+               spin_lock(&mvi->lock);
+       #endif
+               mvs_int_full(mvi);
+       #ifndef MVS_USE_TASKLET
+               spin_unlock(&mvi->lock);
+       #endif
+       }
+       return IRQ_HANDLED;
+}
+
+static void mvs_94xx_command_active(struct mvs_info *mvi, u32 slot_idx)
+{
+       u32 tmp;
+       mvs_cw32(mvi, 0x300 + (slot_idx >> 3), 1 << (slot_idx % 32));
+       do {
+               tmp = mvs_cr32(mvi, 0x300 + (slot_idx >> 3));
+       } while (tmp & 1 << (slot_idx % 32));
+}
+
+static void mvs_94xx_issue_stop(struct mvs_info *mvi, enum mvs_port_type type,
+                               u32 tfs)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+
+       if (type == PORT_TYPE_SATA) {
+               tmp = mr32(MVS_INT_STAT_SRS_0) | (1U << tfs);
+               mw32(MVS_INT_STAT_SRS_0, tmp);
+       }
+       mw32(MVS_INT_STAT, CINT_CI_STOP);
+       tmp = mr32(MVS_PCS) | 0xFF00;
+       mw32(MVS_PCS, tmp);
+}
+
+static void mvs_94xx_free_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp;
+       u8 reg_set = *tfs;
+
+       if (*tfs == MVS_ID_NOT_MAPPED)
+               return;
+
+       mvi->sata_reg_set &= ~bit(reg_set);
+       if (reg_set < 32) {
+               w_reg_set_enable(reg_set, (u32)mvi->sata_reg_set);
+               tmp = mr32(MVS_INT_STAT_SRS_0) & (u32)mvi->sata_reg_set;
+               if (tmp)
+                       mw32(MVS_INT_STAT_SRS_0, tmp);
+       } else {
+               w_reg_set_enable(reg_set, mvi->sata_reg_set);
+               tmp = mr32(MVS_INT_STAT_SRS_1) & mvi->sata_reg_set;
+               if (tmp)
+                       mw32(MVS_INT_STAT_SRS_1, tmp);
+       }
+
+       *tfs = MVS_ID_NOT_MAPPED;
+
+       return;
+}
+
+static u8 mvs_94xx_assign_reg_set(struct mvs_info *mvi, u8 *tfs)
+{
+       int i;
+       void __iomem *regs = mvi->regs;
+
+       if (*tfs != MVS_ID_NOT_MAPPED)
+               return 0;
+
+       i = mv_ffc64(mvi->sata_reg_set);
+       if (i > 32) {
+               mvi->sata_reg_set |= bit(i);
+               w_reg_set_enable(i, (u32)(mvi->sata_reg_set >> 32));
+               *tfs = i;
+               return 0;
+       } else if (i >= 0) {
+               mvi->sata_reg_set |= bit(i);
+               w_reg_set_enable(i, (u32)mvi->sata_reg_set);
+               *tfs = i;
+               return 0;
+       }
+       return MVS_ID_NOT_MAPPED;
+}
+
+static void mvs_94xx_make_prd(struct scatterlist *scatter, int nr, void *prd)
+{
+       int i;
+       struct scatterlist *sg;
+       struct mvs_prd *buf_prd = prd;
+       for_each_sg(scatter, sg, nr, i) {
+               buf_prd->addr = cpu_to_le64(sg_dma_address(sg));
+               buf_prd->im_len.len = cpu_to_le32(sg_dma_len(sg));
+               buf_prd++;
+       }
+}
+
+static int mvs_94xx_oob_done(struct mvs_info *mvi, int i)
+{
+       u32 phy_st;
+       phy_st = mvs_read_phy_ctl(mvi, i);
+       if (phy_st & PHY_READY_MASK)    /* phy ready */
+               return 1;
+       return 0;
+}
+
+static void mvs_94xx_get_dev_identify_frame(struct mvs_info *mvi, int port_id,
+                                       struct sas_identify_frame *id)
+{
+       int i;
+       u32 id_frame[7];
+
+       for (i = 0; i < 7; i++) {
+               mvs_write_port_cfg_addr(mvi, port_id,
+                                       CONFIG_ID_FRAME0 + i * 4);
+               id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+       }
+       memcpy(id, id_frame, 28);
+}
+
+static void mvs_94xx_get_att_identify_frame(struct mvs_info *mvi, int port_id,
+                                       struct sas_identify_frame *id)
+{
+       int i;
+       u32 id_frame[7];
+
+       /* mvs_hexdump(28, (u8 *)id_frame, 0); */
+       for (i = 0; i < 7; i++) {
+               mvs_write_port_cfg_addr(mvi, port_id,
+                                       CONFIG_ATT_ID_FRAME0 + i * 4);
+               id_frame[i] = mvs_read_port_cfg_data(mvi, port_id);
+               mv_dprintk("94xx phy %d atta frame %d %x.\n",
+                       port_id + mvi->id * mvi->chip->n_phy, i, id_frame[i]);
+       }
+       /* mvs_hexdump(28, (u8 *)id_frame, 0); */
+       memcpy(id, id_frame, 28);
+}
+
+static u32 mvs_94xx_make_dev_info(struct sas_identify_frame *id)
+{
+       u32 att_dev_info = 0;
+
+       att_dev_info |= id->dev_type;
+       if (id->stp_iport)
+               att_dev_info |= PORT_DEV_STP_INIT;
+       if (id->smp_iport)
+               att_dev_info |= PORT_DEV_SMP_INIT;
+       if (id->ssp_iport)
+               att_dev_info |= PORT_DEV_SSP_INIT;
+       if (id->stp_tport)
+               att_dev_info |= PORT_DEV_STP_TRGT;
+       if (id->smp_tport)
+               att_dev_info |= PORT_DEV_SMP_TRGT;
+       if (id->ssp_tport)
+               att_dev_info |= PORT_DEV_SSP_TRGT;
+
+       att_dev_info |= (u32)id->phy_id<<24;
+       return att_dev_info;
+}
+
+static u32 mvs_94xx_make_att_info(struct sas_identify_frame *id)
+{
+       return mvs_94xx_make_dev_info(id);
+}
+
+static void mvs_94xx_fix_phy_info(struct mvs_info *mvi, int i,
+                               struct sas_identify_frame *id)
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       mv_dprintk("get all reg link rate is 0x%x\n", phy->phy_status);
+       sas_phy->linkrate =
+               (phy->phy_status & PHY_NEG_SPP_PHYS_LINK_RATE_MASK) >>
+                       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET;
+       sas_phy->linkrate += 0x8;
+       mv_dprintk("get link rate is %d\n", sas_phy->linkrate);
+       phy->minimum_linkrate = SAS_LINK_RATE_1_5_GBPS;
+       phy->maximum_linkrate = SAS_LINK_RATE_6_0_GBPS;
+       mvs_94xx_get_dev_identify_frame(mvi, i, id);
+       phy->dev_info = mvs_94xx_make_dev_info(id);
+
+       if (phy->phy_type & PORT_TYPE_SAS) {
+               mvs_94xx_get_att_identify_frame(mvi, i, id);
+               phy->att_dev_info = mvs_94xx_make_att_info(id);
+               phy->att_dev_sas_addr = *(u64 *)id->sas_addr;
+       } else {
+               phy->att_dev_info = PORT_DEV_STP_TRGT | 1;
+       }
+
+}
+
+void mvs_94xx_phy_set_link_rate(struct mvs_info *mvi, u32 phy_id,
+                       struct sas_phy_linkrates *rates)
+{
+       /* TODO */
+}
+
+static void mvs_94xx_clear_active_cmds(struct mvs_info *mvi)
+{
+       u32 tmp;
+       void __iomem *regs = mvi->regs;
+       tmp = mr32(MVS_STP_REG_SET_0);
+       mw32(MVS_STP_REG_SET_0, 0);
+       mw32(MVS_STP_REG_SET_0, tmp);
+       tmp = mr32(MVS_STP_REG_SET_1);
+       mw32(MVS_STP_REG_SET_1, 0);
+       mw32(MVS_STP_REG_SET_1, tmp);
+}
+
+
+u32 mvs_94xx_spi_read_data(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+       return mr32(SPI_RD_DATA_REG_94XX);
+}
+
+void mvs_94xx_spi_write_data(struct mvs_info *mvi, u32 data)
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+        mw32(SPI_RD_DATA_REG_94XX, data);
+}
+
+
+int mvs_94xx_spi_buildcmd(struct mvs_info *mvi,
+                               u32      *dwCmd,
+                               u8       cmd,
+                               u8       read,
+                               u8       length,
+                               u32      addr
+                               )
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+       u32  dwTmp;
+
+       dwTmp = ((u32)cmd << 8) | ((u32)length << 4);
+       if (read)
+               dwTmp |= SPI_CTRL_READ_94XX;
+
+       if (addr != MV_MAX_U32) {
+               mw32(SPI_ADDR_REG_94XX, (addr & 0x0003FFFFL));
+               dwTmp |= SPI_ADDR_VLD_94XX;
+       }
+
+       *dwCmd = dwTmp;
+       return 0;
+}
+
+
+int mvs_94xx_spi_issuecmd(struct mvs_info *mvi, u32 cmd)
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+       mw32(SPI_CTRL_REG_94XX, cmd | SPI_CTRL_SpiStart_94XX);
+
+       return 0;
+}
+
+int mvs_94xx_spi_waitdataready(struct mvs_info *mvi, u32 timeout)
+{
+       void __iomem *regs = mvi->regs_ex - 0x10200;
+       u32   i, dwTmp;
+
+       for (i = 0; i < timeout; i++) {
+               dwTmp = mr32(SPI_CTRL_REG_94XX);
+               if (!(dwTmp & SPI_CTRL_SpiStart_94XX))
+                       return 0;
+               msleep(10);
+       }
+
+       return -1;
+}
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+void mvs_94xx_fix_dma(dma_addr_t buf_dma, int buf_len, int from, void *prd)
+{
+       int i;
+       struct mvs_prd *buf_prd = prd;
+       buf_prd += from;
+       for (i = 0; i < MAX_SG_ENTRY - from; i++) {
+               buf_prd->addr = cpu_to_le64(buf_dma);
+               buf_prd->im_len.len = cpu_to_le32(buf_len);
+               ++buf_prd;
+       }
+}
+#endif
+
+const struct mvs_dispatch mvs_94xx_dispatch = {
+       "mv94xx",
+       mvs_94xx_init,
+       NULL,
+       mvs_94xx_ioremap,
+       mvs_94xx_iounmap,
+       mvs_94xx_isr,
+       mvs_94xx_isr_status,
+       mvs_94xx_interrupt_enable,
+       mvs_94xx_interrupt_disable,
+       mvs_read_phy_ctl,
+       mvs_write_phy_ctl,
+       mvs_read_port_cfg_data,
+       mvs_write_port_cfg_data,
+       mvs_write_port_cfg_addr,
+       mvs_read_port_vsr_data,
+       mvs_write_port_vsr_data,
+       mvs_write_port_vsr_addr,
+       mvs_read_port_irq_stat,
+       mvs_write_port_irq_stat,
+       mvs_read_port_irq_mask,
+       mvs_write_port_irq_mask,
+       mvs_get_sas_addr,
+       mvs_94xx_command_active,
+       mvs_94xx_issue_stop,
+       mvs_start_delivery,
+       mvs_rx_update,
+       mvs_int_full,
+       mvs_94xx_assign_reg_set,
+       mvs_94xx_free_reg_set,
+       mvs_get_prd_size,
+       mvs_get_prd_count,
+       mvs_94xx_make_prd,
+       mvs_94xx_detect_porttype,
+       mvs_94xx_oob_done,
+       mvs_94xx_fix_phy_info,
+       NULL,
+       mvs_94xx_phy_set_link_rate,
+       mvs_hw_max_link_rate,
+       mvs_94xx_phy_disable,
+       mvs_94xx_phy_enable,
+       mvs_94xx_phy_reset,
+       NULL,
+       mvs_94xx_clear_active_cmds,
+       mvs_94xx_spi_read_data,
+       mvs_94xx_spi_write_data,
+       mvs_94xx_spi_buildcmd,
+       mvs_94xx_spi_issuecmd,
+       mvs_94xx_spi_waitdataready,
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       mvs_94xx_fix_dma,
+#endif
+};
+
diff --git a/drivers/scsi/mvsas/mv_94xx.h b/drivers/scsi/mvsas/mv_94xx.h
new file mode 100644 (file)
index 0000000..23ed9b1
--- /dev/null
@@ -0,0 +1,222 @@
+/*
+ * Marvell 88SE94xx hardware specific head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MVS94XX_REG_H_
+#define _MVS94XX_REG_H_
+
+#include <linux/types.h>
+
+#define MAX_LINK_RATE          SAS_LINK_RATE_6_0_GBPS
+
+enum hw_registers {
+       MVS_GBL_CTL             = 0x04,  /* global control */
+       MVS_GBL_INT_STAT        = 0x00,  /* global irq status */
+       MVS_GBL_PI              = 0x0C,  /* ports implemented bitmask */
+
+       MVS_PHY_CTL             = 0x40,  /* SOC PHY Control */
+       MVS_PORTS_IMP           = 0x9C,  /* SOC Port Implemented */
+
+       MVS_GBL_PORT_TYPE       = 0xa0,  /* port type */
+
+       MVS_CTL                 = 0x100, /* SAS/SATA port configuration */
+       MVS_PCS                 = 0x104, /* SAS/SATA port control/status */
+       MVS_CMD_LIST_LO         = 0x108, /* cmd list addr */
+       MVS_CMD_LIST_HI         = 0x10C,
+       MVS_RX_FIS_LO           = 0x110, /* RX FIS list addr */
+       MVS_RX_FIS_HI           = 0x114,
+       MVS_STP_REG_SET_0       = 0x118, /* STP/SATA Register Set Enable */
+       MVS_STP_REG_SET_1       = 0x11C,
+       MVS_TX_CFG              = 0x120, /* TX configuration */
+       MVS_TX_LO               = 0x124, /* TX (delivery) ring addr */
+       MVS_TX_HI               = 0x128,
+
+       MVS_TX_PROD_IDX         = 0x12C, /* TX producer pointer */
+       MVS_TX_CONS_IDX         = 0x130, /* TX consumer pointer (RO) */
+       MVS_RX_CFG              = 0x134, /* RX configuration */
+       MVS_RX_LO               = 0x138, /* RX (completion) ring addr */
+       MVS_RX_HI               = 0x13C,
+       MVS_RX_CONS_IDX         = 0x140, /* RX consumer pointer (RO) */
+
+       MVS_INT_COAL            = 0x148, /* Int coalescing config */
+       MVS_INT_COAL_TMOUT      = 0x14C, /* Int coalescing timeout */
+       MVS_INT_STAT            = 0x150, /* Central int status */
+       MVS_INT_MASK            = 0x154, /* Central int enable */
+       MVS_INT_STAT_SRS_0      = 0x158, /* SATA register set status */
+       MVS_INT_MASK_SRS_0      = 0x15C,
+       MVS_INT_STAT_SRS_1      = 0x160,
+       MVS_INT_MASK_SRS_1      = 0x164,
+       MVS_NON_NCQ_ERR_0       = 0x168, /* SRS Non-specific NCQ Error */
+       MVS_NON_NCQ_ERR_1       = 0x16C,
+       MVS_CMD_ADDR            = 0x170, /* Command register port (addr) */
+       MVS_CMD_DATA            = 0x174, /* Command register port (data) */
+       MVS_MEM_PARITY_ERR      = 0x178, /* Memory parity error */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_INT_STAT         = 0x180, /* port0 interrupt status */
+       MVS_P0_INT_MASK         = 0x184, /* port0 interrupt mask */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_INT_STAT         = 0x1A0, /* Port4 interrupt status */
+       MVS_P4_INT_MASK         = 0x1A4, /* Port4 interrupt enable mask */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_SER_CTLSTAT      = 0x1D0, /* port0 serial control/status */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_SER_CTLSTAT      = 0x1E0, /* port4 serial control/status */
+
+                                        /* ports 1-3 follow after this */
+       MVS_P0_CFG_ADDR         = 0x200, /* port0 phy register address */
+       MVS_P0_CFG_DATA         = 0x204, /* port0 phy register data */
+                                        /* ports 5-7 follow after this */
+       MVS_P4_CFG_ADDR         = 0x220, /* Port4 config address */
+       MVS_P4_CFG_DATA         = 0x224, /* Port4 config data */
+
+                                        /* phys 1-3 follow after this */
+       MVS_P0_VSR_ADDR         = 0x250, /* phy0 VSR address */
+       MVS_P0_VSR_DATA         = 0x254, /* phy0 VSR data */
+                                        /* phys 1-3 follow after this */
+                                        /* multiplexing */
+       MVS_P4_VSR_ADDR         = 0x250, /* phy4 VSR address */
+       MVS_P4_VSR_DATA         = 0x254, /* phy4 VSR data */
+       MVS_PA_VSR_ADDR         = 0x290, /* All port VSR addr */
+       MVS_PA_VSR_PORT         = 0x294, /* All port VSR data */
+};
+
+enum pci_cfg_registers {
+       PCR_PHY_CTL             = 0x40,
+       PCR_PHY_CTL2            = 0x90,
+       PCR_DEV_CTRL            = 0x78,
+       PCR_LINK_STAT           = 0x82,
+};
+
+/*  SAS/SATA Vendor Specific Port Registers */
+enum sas_sata_vsp_regs {
+       VSR_PHY_STAT            = 0x00 * 4, /* Phy Status */
+       VSR_PHY_MODE1           = 0x01 * 4, /* phy tx */
+       VSR_PHY_MODE2           = 0x02 * 4, /* tx scc */
+       VSR_PHY_MODE3           = 0x03 * 4, /* pll */
+       VSR_PHY_MODE4           = 0x04 * 4, /* VCO */
+       VSR_PHY_MODE5           = 0x05 * 4, /* Rx */
+       VSR_PHY_MODE6           = 0x06 * 4, /* CDR */
+       VSR_PHY_MODE7           = 0x07 * 4, /* Impedance */
+       VSR_PHY_MODE8           = 0x08 * 4, /* Voltage */
+       VSR_PHY_MODE9           = 0x09 * 4, /* Test */
+       VSR_PHY_MODE10          = 0x0A * 4, /* Power */
+       VSR_PHY_MODE11          = 0x0B * 4, /* Phy Mode */
+       VSR_PHY_VS0             = 0x0C * 4, /* Vednor Specific 0 */
+       VSR_PHY_VS1             = 0x0D * 4, /* Vednor Specific 1 */
+};
+
+enum chip_register_bits {
+       PHY_MIN_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+       PHY_MAX_SPP_PHYS_LINK_RATE_MASK = (0x7 << 8),
+       PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET = (12),
+       PHY_NEG_SPP_PHYS_LINK_RATE_MASK =
+                       (0x3 << PHY_NEG_SPP_PHYS_LINK_RATE_MASK_OFFSET),
+};
+
+enum pci_interrupt_cause {
+       /*  MAIN_IRQ_CAUSE (R10200) Bits*/
+       IRQ_COM_IN_I2O_IOP0            = (1 << 0),
+       IRQ_COM_IN_I2O_IOP1            = (1 << 1),
+       IRQ_COM_IN_I2O_IOP2            = (1 << 2),
+       IRQ_COM_IN_I2O_IOP3            = (1 << 3),
+       IRQ_COM_OUT_I2O_HOS0           = (1 << 4),
+       IRQ_COM_OUT_I2O_HOS1           = (1 << 5),
+       IRQ_COM_OUT_I2O_HOS2           = (1 << 6),
+       IRQ_COM_OUT_I2O_HOS3           = (1 << 7),
+       IRQ_PCIF_TO_CPU_DRBL0          = (1 << 8),
+       IRQ_PCIF_TO_CPU_DRBL1          = (1 << 9),
+       IRQ_PCIF_TO_CPU_DRBL2          = (1 << 10),
+       IRQ_PCIF_TO_CPU_DRBL3          = (1 << 11),
+       IRQ_PCIF_DRBL0                 = (1 << 12),
+       IRQ_PCIF_DRBL1                 = (1 << 13),
+       IRQ_PCIF_DRBL2                 = (1 << 14),
+       IRQ_PCIF_DRBL3                 = (1 << 15),
+       IRQ_XOR_A                      = (1 << 16),
+       IRQ_XOR_B                      = (1 << 17),
+       IRQ_SAS_A                      = (1 << 18),
+       IRQ_SAS_B                      = (1 << 19),
+       IRQ_CPU_CNTRL                  = (1 << 20),
+       IRQ_GPIO                       = (1 << 21),
+       IRQ_UART                       = (1 << 22),
+       IRQ_SPI                        = (1 << 23),
+       IRQ_I2C                        = (1 << 24),
+       IRQ_SGPIO                      = (1 << 25),
+       IRQ_COM_ERR                    = (1 << 29),
+       IRQ_I2O_ERR                    = (1 << 30),
+       IRQ_PCIE_ERR                   = (1 << 31),
+};
+
+#define MAX_SG_ENTRY           255
+
+struct mvs_prd_imt {
+       __le32                  len:22;
+       u8                      _r_a:2;
+       u8                      misc_ctl:4;
+       u8                      inter_sel:4;
+};
+
+struct mvs_prd {
+       /* 64-bit buffer address */
+       __le64                  addr;
+       /* 22-bit length */
+       struct mvs_prd_imt      im_len;
+} __attribute__ ((packed));
+
+#define SPI_CTRL_REG_94XX              0xc800
+#define SPI_ADDR_REG_94XX              0xc804
+#define SPI_WR_DATA_REG_94XX         0xc808
+#define SPI_RD_DATA_REG_94XX           0xc80c
+#define SPI_CTRL_READ_94XX             (1U << 2)
+#define SPI_ADDR_VLD_94XX              (1U << 1)
+#define SPI_CTRL_SpiStart_94XX         (1U << 0)
+
+#define mv_ffc(x)   ffz(x)
+
+static inline int
+mv_ffc64(u64 v)
+{
+       int i;
+       i = mv_ffc((u32)v);
+       if (i >= 0)
+               return i;
+       i = mv_ffc((u32)(v>>32));
+
+       if (i != 0)
+               return 32 + i;
+
+       return -1;
+}
+
+#define r_reg_set_enable(i) \
+       (((i) > 31) ? mr32(MVS_STP_REG_SET_1) : \
+       mr32(MVS_STP_REG_SET_0))
+
+#define w_reg_set_enable(i, tmp) \
+       (((i) > 31) ? mw32(MVS_STP_REG_SET_1, tmp) : \
+       mw32(MVS_STP_REG_SET_0, tmp))
+
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+#endif
+
diff --git a/drivers/scsi/mvsas/mv_chips.h b/drivers/scsi/mvsas/mv_chips.h
new file mode 100644 (file)
index 0000000..a67e1c4
--- /dev/null
@@ -0,0 +1,280 @@
+/*
+ * Marvell 88SE64xx/88SE94xx register IO interface
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#ifndef _MV_CHIPS_H_
+#define _MV_CHIPS_H_
+
+#define mr32(reg)      readl(regs + reg)
+#define mw32(reg, val) writel((val), regs + reg)
+#define mw32_f(reg, val)       do {                    \
+                               mw32(reg, val); \
+                               mr32(reg);      \
+                       } while (0)
+
+#define iow32(reg, val)        outl(val, (unsigned long)(regs + reg))
+#define ior32(reg)             inl((unsigned long)(regs + reg))
+#define iow16(reg, val)        outw((unsigned long)(val, regs + reg))
+#define ior16(reg)             inw((unsigned long)(regs + reg))
+#define iow8(reg, val)                 outb((unsigned long)(val, regs + reg))
+#define ior8(reg)              inb((unsigned long)(regs + reg))
+
+static inline u32 mvs_cr32(struct mvs_info *mvi, u32 addr)
+{
+       void __iomem *regs = mvi->regs;
+       mw32(MVS_CMD_ADDR, addr);
+       return mr32(MVS_CMD_DATA);
+}
+
+static inline void mvs_cw32(struct mvs_info *mvi, u32 addr, u32 val)
+{
+       void __iomem *regs = mvi->regs;
+       mw32(MVS_CMD_ADDR, addr);
+       mw32(MVS_CMD_DATA, val);
+}
+
+static inline u32 mvs_read_phy_ctl(struct mvs_info *mvi, u32 port)
+{
+       void __iomem *regs = mvi->regs;
+       return (port < 4) ? mr32(MVS_P0_SER_CTLSTAT + port * 4) :
+               mr32(MVS_P4_SER_CTLSTAT + (port - 4) * 4);
+}
+
+static inline void mvs_write_phy_ctl(struct mvs_info *mvi, u32 port, u32 val)
+{
+       void __iomem *regs = mvi->regs;
+       if (port < 4)
+               mw32(MVS_P0_SER_CTLSTAT + port * 4, val);
+       else
+               mw32(MVS_P4_SER_CTLSTAT + (port - 4) * 4, val);
+}
+
+static inline u32 mvs_read_port(struct mvs_info *mvi, u32 off,
+                               u32 off2, u32 port)
+{
+       void __iomem *regs = mvi->regs + off;
+       void __iomem *regs2 = mvi->regs + off2;
+       return (port < 4) ? readl(regs + port * 8) :
+               readl(regs2 + (port - 4) * 8);
+}
+
+static inline void mvs_write_port(struct mvs_info *mvi, u32 off, u32 off2,
+                               u32 port, u32 val)
+{
+       void __iomem *regs = mvi->regs + off;
+       void __iomem *regs2 = mvi->regs + off2;
+       if (port < 4)
+               writel(val, regs + port * 8);
+       else
+               writel(val, regs2 + (port - 4) * 8);
+}
+
+static inline u32 mvs_read_port_cfg_data(struct mvs_info *mvi, u32 port)
+{
+       return mvs_read_port(mvi, MVS_P0_CFG_DATA,
+                       MVS_P4_CFG_DATA, port);
+}
+
+static inline void mvs_write_port_cfg_data(struct mvs_info *mvi,
+                                               u32 port, u32 val)
+{
+       mvs_write_port(mvi, MVS_P0_CFG_DATA,
+                       MVS_P4_CFG_DATA, port, val);
+}
+
+static inline void mvs_write_port_cfg_addr(struct mvs_info *mvi,
+                                               u32 port, u32 addr)
+{
+       mvs_write_port(mvi, MVS_P0_CFG_ADDR,
+                       MVS_P4_CFG_ADDR, port, addr);
+       mdelay(10);
+}
+
+static inline u32 mvs_read_port_vsr_data(struct mvs_info *mvi, u32 port)
+{
+       return mvs_read_port(mvi, MVS_P0_VSR_DATA,
+                       MVS_P4_VSR_DATA, port);
+}
+
+static inline void mvs_write_port_vsr_data(struct mvs_info *mvi,
+                                               u32 port, u32 val)
+{
+       mvs_write_port(mvi, MVS_P0_VSR_DATA,
+                       MVS_P4_VSR_DATA, port, val);
+}
+
+static inline void mvs_write_port_vsr_addr(struct mvs_info *mvi,
+                                               u32 port, u32 addr)
+{
+       mvs_write_port(mvi, MVS_P0_VSR_ADDR,
+                       MVS_P4_VSR_ADDR, port, addr);
+       mdelay(10);
+}
+
+static inline u32 mvs_read_port_irq_stat(struct mvs_info *mvi, u32 port)
+{
+       return mvs_read_port(mvi, MVS_P0_INT_STAT,
+                       MVS_P4_INT_STAT, port);
+}
+
+static inline void mvs_write_port_irq_stat(struct mvs_info *mvi,
+                                               u32 port, u32 val)
+{
+       mvs_write_port(mvi, MVS_P0_INT_STAT,
+                       MVS_P4_INT_STAT, port, val);
+}
+
+static inline u32 mvs_read_port_irq_mask(struct mvs_info *mvi, u32 port)
+{
+       return mvs_read_port(mvi, MVS_P0_INT_MASK,
+                       MVS_P4_INT_MASK, port);
+
+}
+
+static inline void mvs_write_port_irq_mask(struct mvs_info *mvi,
+                                               u32 port, u32 val)
+{
+       mvs_write_port(mvi, MVS_P0_INT_MASK,
+                       MVS_P4_INT_MASK, port, val);
+}
+
+static inline void __devinit mvs_phy_hacks(struct mvs_info *mvi)
+{
+       u32 tmp;
+
+       /* workaround for SATA R-ERR, to ignore phy glitch */
+       tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+       tmp &= ~(1 << 9);
+       tmp |= (1 << 10);
+       mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+
+       /* enable retry 127 times */
+       mvs_cw32(mvi, CMD_SAS_CTL1, 0x7f7f);
+
+       /* extend open frame timeout to max */
+       tmp = mvs_cr32(mvi, CMD_SAS_CTL0);
+       tmp &= ~0xffff;
+       tmp |= 0x3fff;
+       mvs_cw32(mvi, CMD_SAS_CTL0, tmp);
+
+       /* workaround for WDTIMEOUT , set to 550 ms */
+       mvs_cw32(mvi, CMD_WD_TIMER, 0x7a0000);
+
+       /* not to halt for different port op during wideport link change */
+       mvs_cw32(mvi, CMD_APP_ERR_CONFIG, 0xffefbf7d);
+
+       /* workaround for Seagate disk not-found OOB sequence, recv
+        * COMINIT before sending out COMWAKE */
+       tmp = mvs_cr32(mvi, CMD_PHY_MODE_21);
+       tmp &= 0x0000ffff;
+       tmp |= 0x00fa0000;
+       mvs_cw32(mvi, CMD_PHY_MODE_21, tmp);
+
+       tmp = mvs_cr32(mvi, CMD_PHY_TIMER);
+       tmp &= 0x1fffffff;
+       tmp |= (2U << 29);      /* 8 ms retry */
+       mvs_cw32(mvi, CMD_PHY_TIMER, tmp);
+}
+
+static inline void mvs_int_sata(struct mvs_info *mvi)
+{
+       u32 tmp;
+       void __iomem *regs = mvi->regs;
+       tmp = mr32(MVS_INT_STAT_SRS_0);
+       if (tmp)
+               mw32(MVS_INT_STAT_SRS_0, tmp);
+       MVS_CHIP_DISP->clear_active_cmds(mvi);
+}
+
+static inline void mvs_int_full(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       u32 tmp, stat;
+       int i;
+
+       stat = mr32(MVS_INT_STAT);
+       mvs_int_rx(mvi, false);
+
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               tmp = (stat >> i) & (CINT_PORT | CINT_PORT_STOPPED);
+               if (tmp)
+                       mvs_int_port(mvi, i, tmp);
+       }
+
+       if (stat & CINT_SRS)
+               mvs_int_sata(mvi);
+
+       mw32(MVS_INT_STAT, stat);
+}
+
+static inline void mvs_start_delivery(struct mvs_info *mvi, u32 tx)
+{
+       void __iomem *regs = mvi->regs;
+       mw32(MVS_TX_PROD_IDX, tx);
+}
+
+static inline u32 mvs_rx_update(struct mvs_info *mvi)
+{
+       void __iomem *regs = mvi->regs;
+       return mr32(MVS_RX_CONS_IDX);
+}
+
+static inline u32 mvs_get_prd_size(void)
+{
+       return sizeof(struct mvs_prd);
+}
+
+static inline u32 mvs_get_prd_count(void)
+{
+       return MAX_SG_ENTRY;
+}
+
+static inline void mvs_show_pcie_usage(struct mvs_info *mvi)
+{
+       u16 link_stat, link_spd;
+       const char *spd[] = {
+               "UnKnown",
+               "2.5",
+               "5.0",
+       };
+       if (mvi->flags & MVF_FLAG_SOC || mvi->id > 0)
+               return;
+
+       pci_read_config_word(mvi->pdev, PCR_LINK_STAT, &link_stat);
+       link_spd = (link_stat & PLS_LINK_SPD) >> PLS_LINK_SPD_OFFS;
+       if (link_spd >= 3)
+               link_spd = 0;
+       dev_printk(KERN_INFO, mvi->dev,
+               "mvsas: PCI-E x%u, Bandwidth Usage: %s Gbps\n",
+              (link_stat & PLS_NEG_LINK_WD) >> PLS_NEG_LINK_WD_OFFS,
+              spd[link_spd]);
+}
+
+static inline u32 mvs_hw_max_link_rate(void)
+{
+       return MAX_LINK_RATE;
+}
+
+#endif  /* _MV_CHIPS_H_ */
+
diff --git a/drivers/scsi/mvsas/mv_defs.h b/drivers/scsi/mvsas/mv_defs.h
new file mode 100644 (file)
index 0000000..f8cb9de
--- /dev/null
@@ -0,0 +1,502 @@
+/*
+ * Marvell 88SE64xx/88SE94xx const head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_DEFS_H_
+#define _MV_DEFS_H_
+
+
+enum chip_flavors {
+       chip_6320,
+       chip_6440,
+       chip_6485,
+       chip_9480,
+       chip_9180,
+};
+
+/* driver compile-time configuration */
+enum driver_configuration {
+       MVS_SLOTS               = 512,  /* command slots */
+       MVS_TX_RING_SZ          = 1024, /* TX ring size (12-bit) */
+       MVS_RX_RING_SZ          = 1024, /* RX ring size (12-bit) */
+                                       /* software requires power-of-2
+                                          ring size */
+       MVS_SOC_SLOTS           = 64,
+       MVS_SOC_TX_RING_SZ      = MVS_SOC_SLOTS * 2,
+       MVS_SOC_RX_RING_SZ      = MVS_SOC_SLOTS * 2,
+
+       MVS_SLOT_BUF_SZ         = 8192, /* cmd tbl + IU + status + PRD */
+       MVS_SSP_CMD_SZ          = 64,   /* SSP command table buffer size */
+       MVS_ATA_CMD_SZ          = 96,   /* SATA command table buffer size */
+       MVS_OAF_SZ              = 64,   /* Open address frame buffer size */
+       MVS_QUEUE_SIZE  = 32,   /* Support Queue depth */
+       MVS_CAN_QUEUE           = MVS_SLOTS - 2,        /* SCSI Queue depth */
+       MVS_SOC_CAN_QUEUE       = MVS_SOC_SLOTS - 2,
+};
+
+/* unchangeable hardware details */
+enum hardware_details {
+       MVS_MAX_PHYS            = 8,    /* max. possible phys */
+       MVS_MAX_PORTS           = 8,    /* max. possible ports */
+       MVS_SOC_PHYS            = 4,    /* soc phys */
+       MVS_SOC_PORTS           = 4,    /* soc phys */
+       MVS_MAX_DEVICES = 1024, /* max supported device */
+};
+
+/* peripheral registers (BAR2) */
+enum peripheral_registers {
+       SPI_CTL                 = 0x10, /* EEPROM control */
+       SPI_CMD                 = 0x14, /* EEPROM command */
+       SPI_DATA                = 0x18, /* EEPROM data */
+};
+
+enum peripheral_register_bits {
+       TWSI_RDY                = (1U << 7),    /* EEPROM interface ready */
+       TWSI_RD                 = (1U << 4),    /* EEPROM read access */
+
+       SPI_ADDR_MASK           = 0x3ffff,      /* bits 17:0 */
+};
+
+enum hw_register_bits {
+       /* MVS_GBL_CTL */
+       INT_EN                  = (1U << 1),    /* Global int enable */
+       HBA_RST                 = (1U << 0),    /* HBA reset */
+
+       /* MVS_GBL_INT_STAT */
+       INT_XOR                 = (1U << 4),    /* XOR engine event */
+       INT_SAS_SATA            = (1U << 0),    /* SAS/SATA event */
+
+       /* MVS_GBL_PORT_TYPE */                 /* shl for ports 1-3 */
+       SATA_TARGET             = (1U << 16),   /* port0 SATA target enable */
+       MODE_AUTO_DET_PORT7 = (1U << 15),       /* port0 SAS/SATA autodetect */
+       MODE_AUTO_DET_PORT6 = (1U << 14),
+       MODE_AUTO_DET_PORT5 = (1U << 13),
+       MODE_AUTO_DET_PORT4 = (1U << 12),
+       MODE_AUTO_DET_PORT3 = (1U << 11),
+       MODE_AUTO_DET_PORT2 = (1U << 10),
+       MODE_AUTO_DET_PORT1 = (1U << 9),
+       MODE_AUTO_DET_PORT0 = (1U << 8),
+       MODE_AUTO_DET_EN    =   MODE_AUTO_DET_PORT0 | MODE_AUTO_DET_PORT1 |
+                               MODE_AUTO_DET_PORT2 | MODE_AUTO_DET_PORT3 |
+                               MODE_AUTO_DET_PORT4 | MODE_AUTO_DET_PORT5 |
+                               MODE_AUTO_DET_PORT6 | MODE_AUTO_DET_PORT7,
+       MODE_SAS_PORT7_MASK = (1U << 7),  /* port0 SAS(1), SATA(0) mode */
+       MODE_SAS_PORT6_MASK = (1U << 6),
+       MODE_SAS_PORT5_MASK = (1U << 5),
+       MODE_SAS_PORT4_MASK = (1U << 4),
+       MODE_SAS_PORT3_MASK = (1U << 3),
+       MODE_SAS_PORT2_MASK = (1U << 2),
+       MODE_SAS_PORT1_MASK = (1U << 1),
+       MODE_SAS_PORT0_MASK = (1U << 0),
+       MODE_SAS_SATA   =       MODE_SAS_PORT0_MASK | MODE_SAS_PORT1_MASK |
+                               MODE_SAS_PORT2_MASK | MODE_SAS_PORT3_MASK |
+                               MODE_SAS_PORT4_MASK | MODE_SAS_PORT5_MASK |
+                               MODE_SAS_PORT6_MASK | MODE_SAS_PORT7_MASK,
+
+                               /* SAS_MODE value may be
+                                * dictated (in hw) by values
+                                * of SATA_TARGET & AUTO_DET
+                                */
+
+       /* MVS_TX_CFG */
+       TX_EN                   = (1U << 16),   /* Enable TX */
+       TX_RING_SZ_MASK         = 0xfff,        /* TX ring size, bits 11:0 */
+
+       /* MVS_RX_CFG */
+       RX_EN                   = (1U << 16),   /* Enable RX */
+       RX_RING_SZ_MASK         = 0xfff,        /* RX ring size, bits 11:0 */
+
+       /* MVS_INT_COAL */
+       COAL_EN                 = (1U << 16),   /* Enable int coalescing */
+
+       /* MVS_INT_STAT, MVS_INT_MASK */
+       CINT_I2C                = (1U << 31),   /* I2C event */
+       CINT_SW0                = (1U << 30),   /* software event 0 */
+       CINT_SW1                = (1U << 29),   /* software event 1 */
+       CINT_PRD_BC             = (1U << 28),   /* PRD BC err for read cmd */
+       CINT_DMA_PCIE           = (1U << 27),   /* DMA to PCIE timeout */
+       CINT_MEM                = (1U << 26),   /* int mem parity err */
+       CINT_I2C_SLAVE          = (1U << 25),   /* slave I2C event */
+       CINT_SRS                = (1U << 3),    /* SRS event */
+       CINT_CI_STOP            = (1U << 1),    /* cmd issue stopped */
+       CINT_DONE               = (1U << 0),    /* cmd completion */
+
+                                               /* shl for ports 1-3 */
+       CINT_PORT_STOPPED       = (1U << 16),   /* port0 stopped */
+       CINT_PORT               = (1U << 8),    /* port0 event */
+       CINT_PORT_MASK_OFFSET   = 8,
+       CINT_PORT_MASK          = (0xFF << CINT_PORT_MASK_OFFSET),
+       CINT_PHY_MASK_OFFSET    = 4,
+       CINT_PHY_MASK           = (0x0F << CINT_PHY_MASK_OFFSET),
+
+       /* TX (delivery) ring bits */
+       TXQ_CMD_SHIFT           = 29,
+       TXQ_CMD_SSP             = 1,            /* SSP protocol */
+       TXQ_CMD_SMP             = 2,            /* SMP protocol */
+       TXQ_CMD_STP             = 3,            /* STP/SATA protocol */
+       TXQ_CMD_SSP_FREE_LIST   = 4,            /* add to SSP targ free list */
+       TXQ_CMD_SLOT_RESET      = 7,            /* reset command slot */
+       TXQ_MODE_I              = (1U << 28),   /* mode: 0=target,1=initiator */
+       TXQ_MODE_TARGET         = 0,
+       TXQ_MODE_INITIATOR      = 1,
+       TXQ_PRIO_HI             = (1U << 27),   /* priority: 0=normal, 1=high */
+       TXQ_PRI_NORMAL          = 0,
+       TXQ_PRI_HIGH            = 1,
+       TXQ_SRS_SHIFT           = 20,           /* SATA register set */
+       TXQ_SRS_MASK            = 0x7f,
+       TXQ_PHY_SHIFT           = 12,           /* PHY bitmap */
+       TXQ_PHY_MASK            = 0xff,
+       TXQ_SLOT_MASK           = 0xfff,        /* slot number */
+
+       /* RX (completion) ring bits */
+       RXQ_GOOD                = (1U << 23),   /* Response good */
+       RXQ_SLOT_RESET          = (1U << 21),   /* Slot reset complete */
+       RXQ_CMD_RX              = (1U << 20),   /* target cmd received */
+       RXQ_ATTN                = (1U << 19),   /* attention */
+       RXQ_RSP                 = (1U << 18),   /* response frame xfer'd */
+       RXQ_ERR                 = (1U << 17),   /* err info rec xfer'd */
+       RXQ_DONE                = (1U << 16),   /* cmd complete */
+       RXQ_SLOT_MASK           = 0xfff,        /* slot number */
+
+       /* mvs_cmd_hdr bits */
+       MCH_PRD_LEN_SHIFT       = 16,           /* 16-bit PRD table len */
+       MCH_SSP_FR_TYPE_SHIFT   = 13,           /* SSP frame type */
+
+                                               /* SSP initiator only */
+       MCH_SSP_FR_CMD          = 0x0,          /* COMMAND frame */
+
+                                               /* SSP initiator or target */
+       MCH_SSP_FR_TASK         = 0x1,          /* TASK frame */
+
+                                               /* SSP target only */
+       MCH_SSP_FR_XFER_RDY     = 0x4,          /* XFER_RDY frame */
+       MCH_SSP_FR_RESP         = 0x5,          /* RESPONSE frame */
+       MCH_SSP_FR_READ         = 0x6,          /* Read DATA frame(s) */
+       MCH_SSP_FR_READ_RESP    = 0x7,          /* ditto, plus RESPONSE */
+
+       MCH_SSP_MODE_PASSTHRU   = 1,
+       MCH_SSP_MODE_NORMAL     = 0,
+       MCH_PASSTHRU            = (1U << 12),   /* pass-through (SSP) */
+       MCH_FBURST              = (1U << 11),   /* first burst (SSP) */
+       MCH_CHK_LEN             = (1U << 10),   /* chk xfer len (SSP) */
+       MCH_RETRY               = (1U << 9),    /* tport layer retry (SSP) */
+       MCH_PROTECTION          = (1U << 8),    /* protection info rec (SSP) */
+       MCH_RESET               = (1U << 7),    /* Reset (STP/SATA) */
+       MCH_FPDMA               = (1U << 6),    /* First party DMA (STP/SATA) */
+       MCH_ATAPI               = (1U << 5),    /* ATAPI (STP/SATA) */
+       MCH_BIST                = (1U << 4),    /* BIST activate (STP/SATA) */
+       MCH_PMP_MASK            = 0xf,          /* PMP from cmd FIS (STP/SATA)*/
+
+       CCTL_RST                = (1U << 5),    /* port logic reset */
+
+                                               /* 0(LSB first), 1(MSB first) */
+       CCTL_ENDIAN_DATA        = (1U << 3),    /* PRD data */
+       CCTL_ENDIAN_RSP         = (1U << 2),    /* response frame */
+       CCTL_ENDIAN_OPEN        = (1U << 1),    /* open address frame */
+       CCTL_ENDIAN_CMD         = (1U << 0),    /* command table */
+
+       /* MVS_Px_SER_CTLSTAT (per-phy control) */
+       PHY_SSP_RST             = (1U << 3),    /* reset SSP link layer */
+       PHY_BCAST_CHG           = (1U << 2),    /* broadcast(change) notif */
+       PHY_RST_HARD            = (1U << 1),    /* hard reset + phy reset */
+       PHY_RST                 = (1U << 0),    /* phy reset */
+       PHY_READY_MASK          = (1U << 20),
+
+       /* MVS_Px_INT_STAT, MVS_Px_INT_MASK (per-phy events) */
+       PHYEV_DEC_ERR           = (1U << 24),   /* Phy Decoding Error */
+       PHYEV_DCDR_ERR          = (1U << 23),   /* STP Deocder Error */
+       PHYEV_CRC_ERR           = (1U << 22),   /* STP CRC Error */
+       PHYEV_UNASSOC_FIS       = (1U << 19),   /* unassociated FIS rx'd */
+       PHYEV_AN                = (1U << 18),   /* SATA async notification */
+       PHYEV_BIST_ACT          = (1U << 17),   /* BIST activate FIS */
+       PHYEV_SIG_FIS           = (1U << 16),   /* signature FIS */
+       PHYEV_POOF              = (1U << 12),   /* phy ready from 1 -> 0 */
+       PHYEV_IU_BIG            = (1U << 11),   /* IU too long err */
+       PHYEV_IU_SMALL          = (1U << 10),   /* IU too short err */
+       PHYEV_UNK_TAG           = (1U << 9),    /* unknown tag */
+       PHYEV_BROAD_CH          = (1U << 8),    /* broadcast(CHANGE) */
+       PHYEV_COMWAKE           = (1U << 7),    /* COMWAKE rx'd */
+       PHYEV_PORT_SEL          = (1U << 6),    /* port selector present */
+       PHYEV_HARD_RST          = (1U << 5),    /* hard reset rx'd */
+       PHYEV_ID_TMOUT          = (1U << 4),    /* identify timeout */
+       PHYEV_ID_FAIL           = (1U << 3),    /* identify failed */
+       PHYEV_ID_DONE           = (1U << 2),    /* identify done */
+       PHYEV_HARD_RST_DONE     = (1U << 1),    /* hard reset done */
+       PHYEV_RDY_CH            = (1U << 0),    /* phy ready changed state */
+
+       /* MVS_PCS */
+       PCS_EN_SATA_REG_SHIFT   = (16),         /* Enable SATA Register Set */
+       PCS_EN_PORT_XMT_SHIFT   = (12),         /* Enable Port Transmit */
+       PCS_EN_PORT_XMT_SHIFT2  = (8),          /* For 6485 */
+       PCS_SATA_RETRY          = (1U << 8),    /* retry ctl FIS on R_ERR */
+       PCS_RSP_RX_EN           = (1U << 7),    /* raw response rx */
+       PCS_SATA_RETRY_2        = (1U << 6),    /* For 9180 */
+       PCS_SELF_CLEAR          = (1U << 5),    /* self-clearing int mode */
+       PCS_FIS_RX_EN           = (1U << 4),    /* FIS rx enable */
+       PCS_CMD_STOP_ERR        = (1U << 3),    /* cmd stop-on-err enable */
+       PCS_CMD_RST             = (1U << 1),    /* reset cmd issue */
+       PCS_CMD_EN              = (1U << 0),    /* enable cmd issue */
+
+       /* Port n Attached Device Info */
+       PORT_DEV_SSP_TRGT       = (1U << 19),
+       PORT_DEV_SMP_TRGT       = (1U << 18),
+       PORT_DEV_STP_TRGT       = (1U << 17),
+       PORT_DEV_SSP_INIT       = (1U << 11),
+       PORT_DEV_SMP_INIT       = (1U << 10),
+       PORT_DEV_STP_INIT       = (1U << 9),
+       PORT_PHY_ID_MASK        = (0xFFU << 24),
+       PORT_SSP_TRGT_MASK      = (0x1U << 19),
+       PORT_SSP_INIT_MASK      = (0x1U << 11),
+       PORT_DEV_TRGT_MASK      = (0x7U << 17),
+       PORT_DEV_INIT_MASK      = (0x7U << 9),
+       PORT_DEV_TYPE_MASK      = (0x7U << 0),
+
+       /* Port n PHY Status */
+       PHY_RDY                 = (1U << 2),
+       PHY_DW_SYNC             = (1U << 1),
+       PHY_OOB_DTCTD           = (1U << 0),
+
+       /* VSR */
+       /* PHYMODE 6 (CDB) */
+       PHY_MODE6_LATECLK       = (1U << 29),   /* Lock Clock */
+       PHY_MODE6_DTL_SPEED     = (1U << 27),   /* Digital Loop Speed */
+       PHY_MODE6_FC_ORDER      = (1U << 26),   /* Fibre Channel Mode Order*/
+       PHY_MODE6_MUCNT_EN      = (1U << 24),   /* u Count Enable */
+       PHY_MODE6_SEL_MUCNT_LEN = (1U << 22),   /* Training Length Select */
+       PHY_MODE6_SELMUPI       = (1U << 20),   /* Phase Multi Select (init) */
+       PHY_MODE6_SELMUPF       = (1U << 18),   /* Phase Multi Select (final) */
+       PHY_MODE6_SELMUFF       = (1U << 16),   /* Freq Loop Multi Sel(final) */
+       PHY_MODE6_SELMUFI       = (1U << 14),   /* Freq Loop Multi Sel(init) */
+       PHY_MODE6_FREEZE_LOOP   = (1U << 12),   /* Freeze Rx CDR Loop */
+       PHY_MODE6_INT_RXFOFFS   = (1U << 3),    /* Rx CDR Freq Loop Enable */
+       PHY_MODE6_FRC_RXFOFFS   = (1U << 2),    /* Initial Rx CDR Offset */
+       PHY_MODE6_STAU_0D8      = (1U << 1),    /* Rx CDR Freq Loop Saturate */
+       PHY_MODE6_RXSAT_DIS     = (1U << 0),    /* Saturate Ctl */
+};
+
+/* SAS/SATA configuration port registers, aka phy registers */
+enum sas_sata_config_port_regs {
+       PHYR_IDENTIFY           = 0x00, /* info for IDENTIFY frame */
+       PHYR_ADDR_LO            = 0x04, /* my SAS address (low) */
+       PHYR_ADDR_HI            = 0x08, /* my SAS address (high) */
+       PHYR_ATT_DEV_INFO       = 0x0C, /* attached device info */
+       PHYR_ATT_ADDR_LO        = 0x10, /* attached dev SAS addr (low) */
+       PHYR_ATT_ADDR_HI        = 0x14, /* attached dev SAS addr (high) */
+       PHYR_SATA_CTL           = 0x18, /* SATA control */
+       PHYR_PHY_STAT           = 0x1C, /* PHY status */
+       PHYR_SATA_SIG0  = 0x20, /*port SATA signature FIS(Byte 0-3) */
+       PHYR_SATA_SIG1  = 0x24, /*port SATA signature FIS(Byte 4-7) */
+       PHYR_SATA_SIG2  = 0x28, /*port SATA signature FIS(Byte 8-11) */
+       PHYR_SATA_SIG3  = 0x2c, /*port SATA signature FIS(Byte 12-15) */
+       PHYR_R_ERR_COUNT        = 0x30, /* port R_ERR count register */
+       PHYR_CRC_ERR_COUNT      = 0x34, /* port CRC error count register */
+       PHYR_WIDE_PORT  = 0x38, /* wide port participating */
+       PHYR_CURRENT0           = 0x80, /* current connection info 0 */
+       PHYR_CURRENT1           = 0x84, /* current connection info 1 */
+       PHYR_CURRENT2           = 0x88, /* current connection info 2 */
+       CONFIG_ID_FRAME0       = 0x100, /* Port device ID frame register 0 */
+       CONFIG_ID_FRAME1       = 0x104, /* Port device ID frame register 1 */
+       CONFIG_ID_FRAME2       = 0x108, /* Port device ID frame register 2 */
+       CONFIG_ID_FRAME3       = 0x10c, /* Port device ID frame register 3 */
+       CONFIG_ID_FRAME4       = 0x110, /* Port device ID frame register 4 */
+       CONFIG_ID_FRAME5       = 0x114, /* Port device ID frame register 5 */
+       CONFIG_ID_FRAME6       = 0x118, /* Port device ID frame register 6 */
+       CONFIG_ATT_ID_FRAME0   = 0x11c, /* attached ID frame register 0 */
+       CONFIG_ATT_ID_FRAME1   = 0x120, /* attached ID frame register 1 */
+       CONFIG_ATT_ID_FRAME2   = 0x124, /* attached ID frame register 2 */
+       CONFIG_ATT_ID_FRAME3   = 0x128, /* attached ID frame register 3 */
+       CONFIG_ATT_ID_FRAME4   = 0x12c, /* attached ID frame register 4 */
+       CONFIG_ATT_ID_FRAME5   = 0x130, /* attached ID frame register 5 */
+       CONFIG_ATT_ID_FRAME6   = 0x134, /* attached ID frame register 6 */
+};
+
+enum sas_cmd_port_registers {
+       CMD_CMRST_OOB_DET       = 0x100, /* COMRESET OOB detect register */
+       CMD_CMWK_OOB_DET        = 0x104, /* COMWAKE OOB detect register */
+       CMD_CMSAS_OOB_DET       = 0x108, /* COMSAS OOB detect register */
+       CMD_BRST_OOB_DET        = 0x10c, /* burst OOB detect register */
+       CMD_OOB_SPACE   = 0x110, /* OOB space control register */
+       CMD_OOB_BURST   = 0x114, /* OOB burst control register */
+       CMD_PHY_TIMER           = 0x118, /* PHY timer control register */
+       CMD_PHY_CONFIG0 = 0x11c, /* PHY config register 0 */
+       CMD_PHY_CONFIG1 = 0x120, /* PHY config register 1 */
+       CMD_SAS_CTL0            = 0x124, /* SAS control register 0 */
+       CMD_SAS_CTL1            = 0x128, /* SAS control register 1 */
+       CMD_SAS_CTL2            = 0x12c, /* SAS control register 2 */
+       CMD_SAS_CTL3            = 0x130, /* SAS control register 3 */
+       CMD_ID_TEST             = 0x134, /* ID test register */
+       CMD_PL_TIMER            = 0x138, /* PL timer register */
+       CMD_WD_TIMER            = 0x13c, /* WD timer register */
+       CMD_PORT_SEL_COUNT      = 0x140, /* port selector count register */
+       CMD_APP_MEM_CTL = 0x144, /* Application Memory Control */
+       CMD_XOR_MEM_CTL = 0x148, /* XOR Block Memory Control */
+       CMD_DMA_MEM_CTL = 0x14c, /* DMA Block Memory Control */
+       CMD_PORT_MEM_CTL0       = 0x150, /* Port Memory Control 0 */
+       CMD_PORT_MEM_CTL1       = 0x154, /* Port Memory Control 1 */
+       CMD_SATA_PORT_MEM_CTL0  = 0x158, /* SATA Port Memory Control 0 */
+       CMD_SATA_PORT_MEM_CTL1  = 0x15c, /* SATA Port Memory Control 1 */
+       CMD_XOR_MEM_BIST_CTL    = 0x160, /* XOR Memory BIST Control */
+       CMD_XOR_MEM_BIST_STAT   = 0x164, /* XOR Memroy BIST Status */
+       CMD_DMA_MEM_BIST_CTL    = 0x168, /* DMA Memory BIST Control */
+       CMD_DMA_MEM_BIST_STAT   = 0x16c, /* DMA Memory BIST Status */
+       CMD_PORT_MEM_BIST_CTL   = 0x170, /* Port Memory BIST Control */
+       CMD_PORT_MEM_BIST_STAT0 = 0x174, /* Port Memory BIST Status 0 */
+       CMD_PORT_MEM_BIST_STAT1 = 0x178, /* Port Memory BIST Status 1 */
+       CMD_STP_MEM_BIST_CTL    = 0x17c, /* STP Memory BIST Control */
+       CMD_STP_MEM_BIST_STAT0  = 0x180, /* STP Memory BIST Status 0 */
+       CMD_STP_MEM_BIST_STAT1  = 0x184, /* STP Memory BIST Status 1 */
+       CMD_RESET_COUNT         = 0x188, /* Reset Count */
+       CMD_MONTR_DATA_SEL      = 0x18C, /* Monitor Data/Select */
+       CMD_PLL_PHY_CONFIG      = 0x190, /* PLL/PHY Configuration */
+       CMD_PHY_CTL             = 0x194, /* PHY Control and Status */
+       CMD_PHY_TEST_COUNT0     = 0x198, /* Phy Test Count 0 */
+       CMD_PHY_TEST_COUNT1     = 0x19C, /* Phy Test Count 1 */
+       CMD_PHY_TEST_COUNT2     = 0x1A0, /* Phy Test Count 2 */
+       CMD_APP_ERR_CONFIG      = 0x1A4, /* Application Error Configuration */
+       CMD_PND_FIFO_CTL0       = 0x1A8, /* Pending FIFO Control 0 */
+       CMD_HOST_CTL            = 0x1AC, /* Host Control Status */
+       CMD_HOST_WR_DATA        = 0x1B0, /* Host Write Data */
+       CMD_HOST_RD_DATA        = 0x1B4, /* Host Read Data */
+       CMD_PHY_MODE_21         = 0x1B8, /* Phy Mode 21 */
+       CMD_SL_MODE0            = 0x1BC, /* SL Mode 0 */
+       CMD_SL_MODE1            = 0x1C0, /* SL Mode 1 */
+       CMD_PND_FIFO_CTL1       = 0x1C4, /* Pending FIFO Control 1 */
+};
+
+enum mvs_info_flags {
+       MVF_MSI         = (1U << 0),    /* MSI is enabled */
+       MVF_PHY_PWR_FIX = (1U << 1),    /* bug workaround */
+       MVF_FLAG_SOC            = (1U << 2),    /* SoC integrated controllers */
+};
+
+enum mvs_event_flags {
+       PHY_PLUG_EVENT  = (3U),
+       PHY_PLUG_IN             = (1U << 0),    /* phy plug in */
+       PHY_PLUG_OUT            = (1U << 1),    /* phy plug out */
+};
+
+enum mvs_port_type {
+       PORT_TGT_MASK   =  (1U << 5),
+       PORT_INIT_PORT  =  (1U << 4),
+       PORT_TGT_PORT   =  (1U << 3),
+       PORT_INIT_TGT_PORT = (PORT_INIT_PORT | PORT_TGT_PORT),
+       PORT_TYPE_SAS   =  (1U << 1),
+       PORT_TYPE_SATA  =  (1U << 0),
+};
+
+/* Command Table Format */
+enum ct_format {
+       /* SSP */
+       SSP_F_H         =  0x00,
+       SSP_F_IU        =  0x18,
+       SSP_F_MAX       =  0x4D,
+       /* STP */
+       STP_CMD_FIS     =  0x00,
+       STP_ATAPI_CMD   =  0x40,
+       STP_F_MAX       =  0x10,
+       /* SMP */
+       SMP_F_T         =  0x00,
+       SMP_F_DEP       =  0x01,
+       SMP_F_MAX       =  0x101,
+};
+
+enum status_buffer {
+       SB_EIR_OFF      =  0x00,        /* Error Information Record */
+       SB_RFB_OFF      =  0x08,        /* Response Frame Buffer */
+       SB_RFB_MAX      =  0x400,       /* RFB size*/
+};
+
+enum error_info_rec {
+       CMD_ISS_STPD    = (1U << 31),   /* Cmd Issue Stopped */
+       CMD_PI_ERR      = (1U << 30),   /* Protection info error.  see flags2 */
+       RSP_OVER        = (1U << 29),   /* rsp buffer overflow */
+       RETRY_LIM       = (1U << 28),   /* FIS/frame retry limit exceeded */
+       UNK_FIS         = (1U << 27),   /* unknown FIS */
+       DMA_TERM        = (1U << 26),   /* DMA terminate primitive rx'd */
+       SYNC_ERR        = (1U << 25),   /* SYNC rx'd during frame xmit */
+       TFILE_ERR       = (1U << 24),   /* SATA taskfile Error bit set */
+       R_ERR           = (1U << 23),   /* SATA returned R_ERR prim */
+       RD_OFS          = (1U << 20),   /* Read DATA frame invalid offset */
+       XFER_RDY_OFS    = (1U << 19),   /* XFER_RDY offset error */
+       UNEXP_XFER_RDY  = (1U << 18),   /* unexpected XFER_RDY error */
+       DATA_OVER_UNDER = (1U << 16),   /* data overflow/underflow */
+       INTERLOCK       = (1U << 15),   /* interlock error */
+       NAK             = (1U << 14),   /* NAK rx'd */
+       ACK_NAK_TO      = (1U << 13),   /* ACK/NAK timeout */
+       CXN_CLOSED      = (1U << 12),   /* cxn closed w/out ack/nak */
+       OPEN_TO         = (1U << 11),   /* I_T nexus lost, open cxn timeout */
+       PATH_BLOCKED    = (1U << 10),   /* I_T nexus lost, pathway blocked */
+       NO_DEST         = (1U << 9),    /* I_T nexus lost, no destination */
+       STP_RES_BSY     = (1U << 8),    /* STP resources busy */
+       BREAK           = (1U << 7),    /* break received */
+       BAD_DEST        = (1U << 6),    /* bad destination */
+       BAD_PROTO       = (1U << 5),    /* protocol not supported */
+       BAD_RATE        = (1U << 4),    /* cxn rate not supported */
+       WRONG_DEST      = (1U << 3),    /* wrong destination error */
+       CREDIT_TO       = (1U << 2),    /* credit timeout */
+       WDOG_TO         = (1U << 1),    /* watchdog timeout */
+       BUF_PAR         = (1U << 0),    /* buffer parity error */
+};
+
+enum error_info_rec_2 {
+       SLOT_BSY_ERR    = (1U << 31),   /* Slot Busy Error */
+       GRD_CHK_ERR     = (1U << 14),   /* Guard Check Error */
+       APP_CHK_ERR     = (1U << 13),   /* Application Check error */
+       REF_CHK_ERR     = (1U << 12),   /* Reference Check Error */
+       USR_BLK_NM      = (1U << 0),    /* User Block Number */
+};
+
+enum pci_cfg_register_bits {
+       PCTL_PWR_OFF    = (0xFU << 24),
+       PCTL_COM_ON     = (0xFU << 20),
+       PCTL_LINK_RST   = (0xFU << 16),
+       PCTL_LINK_OFFS  = (16),
+       PCTL_PHY_DSBL   = (0xFU << 12),
+       PCTL_PHY_DSBL_OFFS      = (12),
+       PRD_REQ_SIZE    = (0x4000),
+       PRD_REQ_MASK    = (0x00007000),
+       PLS_NEG_LINK_WD         = (0x3FU << 4),
+       PLS_NEG_LINK_WD_OFFS    = 4,
+       PLS_LINK_SPD            = (0x0FU << 0),
+       PLS_LINK_SPD_OFFS       = 0,
+};
+
+enum open_frame_protocol {
+       PROTOCOL_SMP    = 0x0,
+       PROTOCOL_SSP    = 0x1,
+       PROTOCOL_STP    = 0x2,
+};
+
+/* define for response frame datapres field */
+enum datapres_field {
+       NO_DATA         = 0,
+       RESPONSE_DATA   = 1,
+       SENSE_DATA      = 2,
+};
+
+/* define task management IU */
+struct mvs_tmf_task{
+       u8 tmf;
+       u16 tag_of_task_to_be_managed;
+};
+#endif
diff --git a/drivers/scsi/mvsas/mv_init.c b/drivers/scsi/mvsas/mv_init.c
new file mode 100644 (file)
index 0000000..8646a19
--- /dev/null
@@ -0,0 +1,703 @@
+/*
+ * Marvell 88SE64xx/88SE94xx pci init
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+
+#include "mv_sas.h"
+
+static struct scsi_transport_template *mvs_stt;
+static const struct mvs_chip_info mvs_chips[] = {
+       [chip_6320] =   { 1, 2, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
+       [chip_6440] =   { 1, 4, 0x400, 17, 16,  9, &mvs_64xx_dispatch, },
+       [chip_6485] =   { 1, 8, 0x800, 33, 32, 10, &mvs_64xx_dispatch, },
+       [chip_9180] =   { 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
+       [chip_9480] =   { 2, 4, 0x800, 17, 64,  9, &mvs_94xx_dispatch, },
+};
+
+#define SOC_SAS_NUM 2
+
+static struct scsi_host_template mvs_sht = {
+       .module                 = THIS_MODULE,
+       .name                   = DRV_NAME,
+       .queuecommand           = sas_queuecommand,
+       .target_alloc           = sas_target_alloc,
+       .slave_configure        = mvs_slave_configure,
+       .slave_destroy          = sas_slave_destroy,
+       .scan_finished          = mvs_scan_finished,
+       .scan_start             = mvs_scan_start,
+       .change_queue_depth     = sas_change_queue_depth,
+       .change_queue_type      = sas_change_queue_type,
+       .bios_param             = sas_bios_param,
+       .can_queue              = 1,
+       .cmd_per_lun            = 1,
+       .this_id                = -1,
+       .sg_tablesize           = SG_ALL,
+       .max_sectors            = SCSI_DEFAULT_MAX_SECTORS,
+       .use_clustering         = ENABLE_CLUSTERING,
+       .eh_device_reset_handler        = sas_eh_device_reset_handler,
+       .eh_bus_reset_handler   = sas_eh_bus_reset_handler,
+       .slave_alloc            = mvs_slave_alloc,
+       .target_destroy         = sas_target_destroy,
+       .ioctl                  = sas_ioctl,
+};
+
+static struct sas_domain_function_template mvs_transport_ops = {
+       .lldd_dev_found         = mvs_dev_found,
+       .lldd_dev_gone  = mvs_dev_gone,
+
+       .lldd_execute_task      = mvs_queue_command,
+       .lldd_control_phy       = mvs_phy_control,
+
+       .lldd_abort_task        = mvs_abort_task,
+       .lldd_abort_task_set    = mvs_abort_task_set,
+       .lldd_clear_aca         = mvs_clear_aca,
+       .lldd_clear_task_set    = mvs_clear_task_set,
+       .lldd_I_T_nexus_reset   = mvs_I_T_nexus_reset,
+       .lldd_lu_reset          = mvs_lu_reset,
+       .lldd_query_task        = mvs_query_task,
+
+       .lldd_port_formed       = mvs_port_formed,
+       .lldd_port_deformed     = mvs_port_deformed,
+
+};
+
+static void __devinit mvs_phy_init(struct mvs_info *mvi, int phy_id)
+{
+       struct mvs_phy *phy = &mvi->phy[phy_id];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+       phy->mvi = mvi;
+       init_timer(&phy->timer);
+       sas_phy->enabled = (phy_id < mvi->chip->n_phy) ? 1 : 0;
+       sas_phy->class = SAS;
+       sas_phy->iproto = SAS_PROTOCOL_ALL;
+       sas_phy->tproto = 0;
+       sas_phy->type = PHY_TYPE_PHYSICAL;
+       sas_phy->role = PHY_ROLE_INITIATOR;
+       sas_phy->oob_mode = OOB_NOT_CONNECTED;
+       sas_phy->linkrate = SAS_LINK_RATE_UNKNOWN;
+
+       sas_phy->id = phy_id;
+       sas_phy->sas_addr = &mvi->sas_addr[0];
+       sas_phy->frame_rcvd = &phy->frame_rcvd[0];
+       sas_phy->ha = (struct sas_ha_struct *)mvi->shost->hostdata;
+       sas_phy->lldd_phy = phy;
+}
+
+static void mvs_free(struct mvs_info *mvi)
+{
+       int i;
+       struct mvs_wq *mwq;
+       int slot_nr;
+
+       if (!mvi)
+               return;
+
+       if (mvi->flags & MVF_FLAG_SOC)
+               slot_nr = MVS_SOC_SLOTS;
+       else
+               slot_nr = MVS_SLOTS;
+
+       for (i = 0; i < mvi->tags_num; i++) {
+               struct mvs_slot_info *slot = &mvi->slot_info[i];
+               if (slot->buf)
+                       dma_free_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
+                                         slot->buf, slot->buf_dma);
+       }
+
+       if (mvi->tx)
+               dma_free_coherent(mvi->dev,
+                                 sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+                                 mvi->tx, mvi->tx_dma);
+       if (mvi->rx_fis)
+               dma_free_coherent(mvi->dev, MVS_RX_FISL_SZ,
+                                 mvi->rx_fis, mvi->rx_fis_dma);
+       if (mvi->rx)
+               dma_free_coherent(mvi->dev,
+                                 sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+                                 mvi->rx, mvi->rx_dma);
+       if (mvi->slot)
+               dma_free_coherent(mvi->dev,
+                                 sizeof(*mvi->slot) * slot_nr,
+                                 mvi->slot, mvi->slot_dma);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       if (mvi->bulk_buffer)
+               dma_free_coherent(mvi->dev, TRASH_BUCKET_SIZE,
+                                 mvi->bulk_buffer, mvi->bulk_buffer_dma);
+#endif
+
+       MVS_CHIP_DISP->chip_iounmap(mvi);
+       if (mvi->shost)
+               scsi_host_put(mvi->shost);
+       list_for_each_entry(mwq, &mvi->wq_list, entry)
+               cancel_delayed_work(&mwq->work_q);
+       kfree(mvi);
+}
+
+#ifdef MVS_USE_TASKLET
+struct tasklet_struct  mv_tasklet;
+static void mvs_tasklet(unsigned long opaque)
+{
+       unsigned long flags;
+       u32 stat;
+       u16 core_nr, i = 0;
+
+       struct mvs_info *mvi;
+       struct sas_ha_struct *sha = (struct sas_ha_struct *)opaque;
+
+       core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+       if (unlikely(!mvi))
+               BUG_ON(1);
+
+       for (i = 0; i < core_nr; i++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+               stat = MVS_CHIP_DISP->isr_status(mvi, mvi->irq);
+               if (stat)
+                       MVS_CHIP_DISP->isr(mvi, mvi->irq, stat);
+       }
+
+}
+#endif
+
+static irqreturn_t mvs_interrupt(int irq, void *opaque)
+{
+       u32 core_nr, i = 0;
+       u32 stat;
+       struct mvs_info *mvi;
+       struct sas_ha_struct *sha = opaque;
+
+       core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+       if (unlikely(!mvi))
+               return IRQ_NONE;
+
+       stat = MVS_CHIP_DISP->isr_status(mvi, irq);
+       if (!stat)
+               return IRQ_NONE;
+
+#ifdef MVS_USE_TASKLET
+       tasklet_schedule(&mv_tasklet);
+#else
+       for (i = 0; i < core_nr; i++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+               MVS_CHIP_DISP->isr(mvi, irq, stat);
+       }
+#endif
+       return IRQ_HANDLED;
+}
+
+static int __devinit mvs_alloc(struct mvs_info *mvi, struct Scsi_Host *shost)
+{
+       int i, slot_nr;
+
+       if (mvi->flags & MVF_FLAG_SOC)
+               slot_nr = MVS_SOC_SLOTS;
+       else
+               slot_nr = MVS_SLOTS;
+
+       spin_lock_init(&mvi->lock);
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               mvs_phy_init(mvi, i);
+               mvi->port[i].wide_port_phymap = 0;
+               mvi->port[i].port_attached = 0;
+               INIT_LIST_HEAD(&mvi->port[i].list);
+       }
+       for (i = 0; i < MVS_MAX_DEVICES; i++) {
+               mvi->devices[i].taskfileset = MVS_ID_NOT_MAPPED;
+               mvi->devices[i].dev_type = NO_DEVICE;
+               mvi->devices[i].device_id = i;
+               mvi->devices[i].dev_status = MVS_DEV_NORMAL;
+       }
+
+       /*
+        * alloc and init our DMA areas
+        */
+       mvi->tx = dma_alloc_coherent(mvi->dev,
+                                    sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ,
+                                    &mvi->tx_dma, GFP_KERNEL);
+       if (!mvi->tx)
+               goto err_out;
+       memset(mvi->tx, 0, sizeof(*mvi->tx) * MVS_CHIP_SLOT_SZ);
+       mvi->rx_fis = dma_alloc_coherent(mvi->dev, MVS_RX_FISL_SZ,
+                                        &mvi->rx_fis_dma, GFP_KERNEL);
+       if (!mvi->rx_fis)
+               goto err_out;
+       memset(mvi->rx_fis, 0, MVS_RX_FISL_SZ);
+
+       mvi->rx = dma_alloc_coherent(mvi->dev,
+                                    sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1),
+                                    &mvi->rx_dma, GFP_KERNEL);
+       if (!mvi->rx)
+               goto err_out;
+       memset(mvi->rx, 0, sizeof(*mvi->rx) * (MVS_RX_RING_SZ + 1));
+       mvi->rx[0] = cpu_to_le32(0xfff);
+       mvi->rx_cons = 0xfff;
+
+       mvi->slot = dma_alloc_coherent(mvi->dev,
+                                      sizeof(*mvi->slot) * slot_nr,
+                                      &mvi->slot_dma, GFP_KERNEL);
+       if (!mvi->slot)
+               goto err_out;
+       memset(mvi->slot, 0, sizeof(*mvi->slot) * slot_nr);
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       mvi->bulk_buffer = dma_alloc_coherent(mvi->dev,
+                                      TRASH_BUCKET_SIZE,
+                                      &mvi->bulk_buffer_dma, GFP_KERNEL);
+       if (!mvi->bulk_buffer)
+               goto err_out;
+#endif
+       for (i = 0; i < slot_nr; i++) {
+               struct mvs_slot_info *slot = &mvi->slot_info[i];
+
+               slot->buf = dma_alloc_coherent(mvi->dev, MVS_SLOT_BUF_SZ,
+                                              &slot->buf_dma, GFP_KERNEL);
+               if (!slot->buf) {
+                       printk(KERN_DEBUG"failed to allocate slot->buf.\n");
+                       goto err_out;
+               }
+               memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+               ++mvi->tags_num;
+       }
+       /* Initialize tags */
+       mvs_tag_init(mvi);
+       return 0;
+err_out:
+       return 1;
+}
+
+
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex)
+{
+       unsigned long res_start, res_len, res_flag, res_flag_ex = 0;
+       struct pci_dev *pdev = mvi->pdev;
+       if (bar_ex != -1) {
+               /*
+                * ioremap main and peripheral registers
+                */
+               res_start = pci_resource_start(pdev, bar_ex);
+               res_len = pci_resource_len(pdev, bar_ex);
+               if (!res_start || !res_len)
+                       goto err_out;
+
+               res_flag_ex = pci_resource_flags(pdev, bar_ex);
+               if (res_flag_ex & IORESOURCE_MEM) {
+                       if (res_flag_ex & IORESOURCE_CACHEABLE)
+                               mvi->regs_ex = ioremap(res_start, res_len);
+                       else
+                               mvi->regs_ex = ioremap_nocache(res_start,
+                                               res_len);
+               } else
+                       mvi->regs_ex = (void *)res_start;
+               if (!mvi->regs_ex)
+                       goto err_out;
+       }
+
+       res_start = pci_resource_start(pdev, bar);
+       res_len = pci_resource_len(pdev, bar);
+       if (!res_start || !res_len)
+               goto err_out;
+
+       res_flag = pci_resource_flags(pdev, bar);
+       if (res_flag & IORESOURCE_CACHEABLE)
+               mvi->regs = ioremap(res_start, res_len);
+       else
+               mvi->regs = ioremap_nocache(res_start, res_len);
+
+       if (!mvi->regs) {
+               if (mvi->regs_ex && (res_flag_ex & IORESOURCE_MEM))
+                       iounmap(mvi->regs_ex);
+               mvi->regs_ex = NULL;
+               goto err_out;
+       }
+
+       return 0;
+err_out:
+       return -1;
+}
+
+void mvs_iounmap(void __iomem *regs)
+{
+       iounmap(regs);
+}
+
+static struct mvs_info *__devinit mvs_pci_alloc(struct pci_dev *pdev,
+                               const struct pci_device_id *ent,
+                               struct Scsi_Host *shost, unsigned int id)
+{
+       struct mvs_info *mvi;
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+       mvi = kzalloc(sizeof(*mvi) + MVS_SLOTS * sizeof(struct mvs_slot_info),
+                       GFP_KERNEL);
+       if (!mvi)
+               return NULL;
+
+       mvi->pdev = pdev;
+       mvi->dev = &pdev->dev;
+       mvi->chip_id = ent->driver_data;
+       mvi->chip = &mvs_chips[mvi->chip_id];
+       INIT_LIST_HEAD(&mvi->wq_list);
+       mvi->irq = pdev->irq;
+
+       ((struct mvs_prv_info *)sha->lldd_ha)->mvi[id] = mvi;
+       ((struct mvs_prv_info *)sha->lldd_ha)->n_phy = mvi->chip->n_phy;
+
+       mvi->id = id;
+       mvi->sas = sha;
+       mvi->shost = shost;
+#ifdef MVS_USE_TASKLET
+       tasklet_init(&mv_tasklet, mvs_tasklet, (unsigned long)sha);
+#endif
+
+       if (MVS_CHIP_DISP->chip_ioremap(mvi))
+               goto err_out;
+       if (!mvs_alloc(mvi, shost))
+               return mvi;
+err_out:
+       mvs_free(mvi);
+       return NULL;
+}
+
+/* move to PCI layer or libata core? */
+static int pci_go_64(struct pci_dev *pdev)
+{
+       int rc;
+
+       if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+               if (rc) {
+                       rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+                       if (rc) {
+                               dev_printk(KERN_ERR, &pdev->dev,
+                                          "64-bit DMA enable failed\n");
+                               return rc;
+                       }
+               }
+       } else {
+               rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (rc) {
+                       dev_printk(KERN_ERR, &pdev->dev,
+                                  "32-bit DMA enable failed\n");
+                       return rc;
+               }
+               rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+               if (rc) {
+                       dev_printk(KERN_ERR, &pdev->dev,
+                                  "32-bit consistent DMA enable failed\n");
+                       return rc;
+               }
+       }
+
+       return rc;
+}
+
+static int __devinit mvs_prep_sas_ha_init(struct Scsi_Host *shost,
+                               const struct mvs_chip_info *chip_info)
+{
+       int phy_nr, port_nr; unsigned short core_nr;
+       struct asd_sas_phy **arr_phy;
+       struct asd_sas_port **arr_port;
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+       core_nr = chip_info->n_host;
+       phy_nr  = core_nr * chip_info->n_phy;
+       port_nr = phy_nr;
+
+       memset(sha, 0x00, sizeof(struct sas_ha_struct));
+       arr_phy  = kcalloc(phy_nr, sizeof(void *), GFP_KERNEL);
+       arr_port = kcalloc(port_nr, sizeof(void *), GFP_KERNEL);
+       if (!arr_phy || !arr_port)
+               goto exit_free;
+
+       sha->sas_phy = arr_phy;
+       sha->sas_port = arr_port;
+
+       sha->lldd_ha = kzalloc(sizeof(struct mvs_prv_info), GFP_KERNEL);
+       if (!sha->lldd_ha)
+               goto exit_free;
+
+       ((struct mvs_prv_info *)sha->lldd_ha)->n_host = core_nr;
+
+       shost->transportt = mvs_stt;
+       shost->max_id = 128;
+       shost->max_lun = ~0;
+       shost->max_channel = 1;
+       shost->max_cmd_len = 16;
+
+       return 0;
+exit_free:
+       kfree(arr_phy);
+       kfree(arr_port);
+       return -1;
+
+}
+
+static void  __devinit mvs_post_sas_ha_init(struct Scsi_Host *shost,
+                       const struct mvs_chip_info *chip_info)
+{
+       int can_queue, i = 0, j = 0;
+       struct mvs_info *mvi = NULL;
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+       unsigned short nr_core = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+       for (j = 0; j < nr_core; j++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+               for (i = 0; i < chip_info->n_phy; i++) {
+                       sha->sas_phy[j * chip_info->n_phy  + i] =
+                               &mvi->phy[i].sas_phy;
+                       sha->sas_port[j * chip_info->n_phy + i] =
+                               &mvi->port[i].sas_port;
+               }
+       }
+
+       sha->sas_ha_name = DRV_NAME;
+       sha->dev = mvi->dev;
+       sha->lldd_module = THIS_MODULE;
+       sha->sas_addr = &mvi->sas_addr[0];
+
+       sha->num_phys = nr_core * chip_info->n_phy;
+
+       sha->lldd_max_execute_num = 1;
+
+       if (mvi->flags & MVF_FLAG_SOC)
+               can_queue = MVS_SOC_CAN_QUEUE;
+       else
+               can_queue = MVS_CAN_QUEUE;
+
+       sha->lldd_queue_size = can_queue;
+       shost->can_queue = can_queue;
+       mvi->shost->cmd_per_lun = MVS_SLOTS/sha->num_phys;
+       sha->core.shost = mvi->shost;
+}
+
+static void mvs_init_sas_add(struct mvs_info *mvi)
+{
+       u8 i;
+       for (i = 0; i < mvi->chip->n_phy; i++) {
+               mvi->phy[i].dev_sas_addr = 0x5005043011ab0000ULL;
+               mvi->phy[i].dev_sas_addr =
+                       cpu_to_be64((u64)(*(u64 *)&mvi->phy[i].dev_sas_addr));
+       }
+
+       memcpy(mvi->sas_addr, &mvi->phy[0].dev_sas_addr, SAS_ADDR_SIZE);
+}
+
+static int __devinit mvs_pci_init(struct pci_dev *pdev,
+                                 const struct pci_device_id *ent)
+{
+       unsigned int rc, nhost = 0;
+       struct mvs_info *mvi;
+       irq_handler_t irq_handler = mvs_interrupt;
+       struct Scsi_Host *shost = NULL;
+       const struct mvs_chip_info *chip;
+
+       dev_printk(KERN_INFO, &pdev->dev,
+               "mvsas: driver version %s\n", DRV_VERSION);
+       rc = pci_enable_device(pdev);
+       if (rc)
+               goto err_out_enable;
+
+       pci_set_master(pdev);
+
+       rc = pci_request_regions(pdev, DRV_NAME);
+       if (rc)
+               goto err_out_disable;
+
+       rc = pci_go_64(pdev);
+       if (rc)
+               goto err_out_regions;
+
+       shost = scsi_host_alloc(&mvs_sht, sizeof(void *));
+       if (!shost) {
+               rc = -ENOMEM;
+               goto err_out_regions;
+       }
+
+       chip = &mvs_chips[ent->driver_data];
+       SHOST_TO_SAS_HA(shost) =
+               kcalloc(1, sizeof(struct sas_ha_struct), GFP_KERNEL);
+       if (!SHOST_TO_SAS_HA(shost)) {
+               kfree(shost);
+               rc = -ENOMEM;
+               goto err_out_regions;
+       }
+
+       rc = mvs_prep_sas_ha_init(shost, chip);
+       if (rc) {
+               kfree(shost);
+               rc = -ENOMEM;
+               goto err_out_regions;
+       }
+
+       pci_set_drvdata(pdev, SHOST_TO_SAS_HA(shost));
+
+       do {
+               mvi = mvs_pci_alloc(pdev, ent, shost, nhost);
+               if (!mvi) {
+                       rc = -ENOMEM;
+                       goto err_out_regions;
+               }
+
+               mvs_init_sas_add(mvi);
+
+               mvi->instance = nhost;
+               rc = MVS_CHIP_DISP->chip_init(mvi);
+               if (rc) {
+                       mvs_free(mvi);
+                       goto err_out_regions;
+               }
+               nhost++;
+       } while (nhost < chip->n_host);
+
+       mvs_post_sas_ha_init(shost, chip);
+
+       rc = scsi_add_host(shost, &pdev->dev);
+       if (rc)
+               goto err_out_shost;
+
+       rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
+       if (rc)
+               goto err_out_shost;
+       rc = request_irq(pdev->irq, irq_handler, IRQF_SHARED,
+               DRV_NAME, SHOST_TO_SAS_HA(shost));
+       if (rc)
+               goto err_not_sas;
+
+       MVS_CHIP_DISP->interrupt_enable(mvi);
+
+       scsi_scan_host(mvi->shost);
+
+       return 0;
+
+err_not_sas:
+       sas_unregister_ha(SHOST_TO_SAS_HA(shost));
+err_out_shost:
+       scsi_remove_host(mvi->shost);
+err_out_regions:
+       pci_release_regions(pdev);
+err_out_disable:
+       pci_disable_device(pdev);
+err_out_enable:
+       return rc;
+}
+
+static void __devexit mvs_pci_remove(struct pci_dev *pdev)
+{
+       unsigned short core_nr, i = 0;
+       struct sas_ha_struct *sha = pci_get_drvdata(pdev);
+       struct mvs_info *mvi = NULL;
+
+       core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[0];
+
+#ifdef MVS_USE_TASKLET
+       tasklet_kill(&mv_tasklet);
+#endif
+
+       pci_set_drvdata(pdev, NULL);
+       sas_unregister_ha(sha);
+       sas_remove_host(mvi->shost);
+       scsi_remove_host(mvi->shost);
+
+       MVS_CHIP_DISP->interrupt_disable(mvi);
+       free_irq(mvi->irq, sha);
+       for (i = 0; i < core_nr; i++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[i];
+               mvs_free(mvi);
+       }
+       kfree(sha->sas_phy);
+       kfree(sha->sas_port);
+       kfree(sha);
+       pci_release_regions(pdev);
+       pci_disable_device(pdev);
+       return;
+}
+
+static struct pci_device_id __devinitdata mvs_pci_table[] = {
+       { PCI_VDEVICE(MARVELL, 0x6320), chip_6320 },
+       { PCI_VDEVICE(MARVELL, 0x6340), chip_6440 },
+       {
+               .vendor         = PCI_VENDOR_ID_MARVELL,
+               .device         = 0x6440,
+               .subvendor      = PCI_ANY_ID,
+               .subdevice      = 0x6480,
+               .class          = 0,
+               .class_mask     = 0,
+               .driver_data    = chip_6485,
+       },
+       { PCI_VDEVICE(MARVELL, 0x6440), chip_6440 },
+       { PCI_VDEVICE(MARVELL, 0x6485), chip_6485 },
+       { PCI_VDEVICE(MARVELL, 0x9480), chip_9480 },
+       { PCI_VDEVICE(MARVELL, 0x9180), chip_9180 },
+
+       { }     /* terminate list */
+};
+
+static struct pci_driver mvs_pci_driver = {
+       .name           = DRV_NAME,
+       .id_table       = mvs_pci_table,
+       .probe          = mvs_pci_init,
+       .remove         = __devexit_p(mvs_pci_remove),
+};
+
+/* task handler */
+struct task_struct *mvs_th;
+static int __init mvs_init(void)
+{
+       int rc;
+       mvs_stt = sas_domain_attach_transport(&mvs_transport_ops);
+       if (!mvs_stt)
+               return -ENOMEM;
+
+       rc = pci_register_driver(&mvs_pci_driver);
+
+       if (rc)
+               goto err_out;
+
+       return 0;
+
+err_out:
+       sas_release_transport(mvs_stt);
+       return rc;
+}
+
+static void __exit mvs_exit(void)
+{
+       pci_unregister_driver(&mvs_pci_driver);
+       sas_release_transport(mvs_stt);
+}
+
+module_init(mvs_init);
+module_exit(mvs_exit);
+
+MODULE_AUTHOR("Jeff Garzik <jgarzik@pobox.com>");
+MODULE_DESCRIPTION("Marvell 88SE6440 SAS/SATA controller driver");
+MODULE_VERSION(DRV_VERSION);
+MODULE_LICENSE("GPL");
+#ifdef CONFIG_PCI
+MODULE_DEVICE_TABLE(pci, mvs_pci_table);
+#endif
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
new file mode 100644 (file)
index 0000000..0d21386
--- /dev/null
@@ -0,0 +1,2154 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#include "mv_sas.h"
+
+static int mvs_find_tag(struct mvs_info *mvi, struct sas_task *task, u32 *tag)
+{
+       if (task->lldd_task) {
+               struct mvs_slot_info *slot;
+               slot = task->lldd_task;
+               *tag = slot->slot_tag;
+               return 1;
+       }
+       return 0;
+}
+
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag)
+{
+       void *bitmap = &mvi->tags;
+       clear_bit(tag, bitmap);
+}
+
+void mvs_tag_free(struct mvs_info *mvi, u32 tag)
+{
+       mvs_tag_clear(mvi, tag);
+}
+
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag)
+{
+       void *bitmap = &mvi->tags;
+       set_bit(tag, bitmap);
+}
+
+inline int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out)
+{
+       unsigned int index, tag;
+       void *bitmap = &mvi->tags;
+
+       index = find_first_zero_bit(bitmap, mvi->tags_num);
+       tag = index;
+       if (tag >= mvi->tags_num)
+               return -SAS_QUEUE_FULL;
+       mvs_tag_set(mvi, tag);
+       *tag_out = tag;
+       return 0;
+}
+
+void mvs_tag_init(struct mvs_info *mvi)
+{
+       int i;
+       for (i = 0; i < mvi->tags_num; ++i)
+               mvs_tag_clear(mvi, i);
+}
+
+void mvs_hexdump(u32 size, u8 *data, u32 baseaddr)
+{
+       u32 i;
+       u32 run;
+       u32 offset;
+
+       offset = 0;
+       while (size) {
+               printk(KERN_DEBUG"%08X : ", baseaddr + offset);
+               if (size >= 16)
+                       run = 16;
+               else
+                       run = size;
+               size -= run;
+               for (i = 0; i < 16; i++) {
+                       if (i < run)
+                               printk(KERN_DEBUG"%02X ", (u32)data[i]);
+                       else
+                               printk(KERN_DEBUG"   ");
+               }
+               printk(KERN_DEBUG": ");
+               for (i = 0; i < run; i++)
+                       printk(KERN_DEBUG"%c",
+                               isalnum(data[i]) ? data[i] : '.');
+               printk(KERN_DEBUG"\n");
+               data = &data[16];
+               offset += run;
+       }
+       printk(KERN_DEBUG"\n");
+}
+
+#if (_MV_DUMP > 1)
+static void mvs_hba_sb_dump(struct mvs_info *mvi, u32 tag,
+                                  enum sas_protocol proto)
+{
+       u32 offset;
+       struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+       offset = slot->cmd_size + MVS_OAF_SZ +
+           MVS_CHIP_DISP->prd_size() * slot->n_elem;
+       dev_printk(KERN_DEBUG, mvi->dev, "+---->Status buffer[%d] :\n",
+                       tag);
+       mvs_hexdump(32, (u8 *) slot->response,
+                   (u32) slot->buf_dma + offset);
+}
+#endif
+
+static void mvs_hba_memory_dump(struct mvs_info *mvi, u32 tag,
+                               enum sas_protocol proto)
+{
+#if (_MV_DUMP > 1)
+       u32 sz, w_ptr;
+       u64 addr;
+       struct mvs_slot_info *slot = &mvi->slot_info[tag];
+
+       /*Delivery Queue */
+       sz = MVS_CHIP_SLOT_SZ;
+       w_ptr = slot->tx;
+       addr = mvi->tx_dma;
+       dev_printk(KERN_DEBUG, mvi->dev,
+               "Delivery Queue Size=%04d , WRT_PTR=%04X\n", sz, w_ptr);
+       dev_printk(KERN_DEBUG, mvi->dev,
+               "Delivery Queue Base Address=0x%llX (PA)"
+               "(tx_dma=0x%llX), Entry=%04d\n",
+               addr, (unsigned long long)mvi->tx_dma, w_ptr);
+       mvs_hexdump(sizeof(u32), (u8 *)(&mvi->tx[mvi->tx_prod]),
+                       (u32) mvi->tx_dma + sizeof(u32) * w_ptr);
+       /*Command List */
+       addr = mvi->slot_dma;
+       dev_printk(KERN_DEBUG, mvi->dev,
+               "Command List Base Address=0x%llX (PA)"
+               "(slot_dma=0x%llX), Header=%03d\n",
+               addr, (unsigned long long)slot->buf_dma, tag);
+       dev_printk(KERN_DEBUG, mvi->dev, "Command Header[%03d]:\n", tag);
+       /*mvs_cmd_hdr */
+       mvs_hexdump(sizeof(struct mvs_cmd_hdr), (u8 *)(&mvi->slot[tag]),
+               (u32) mvi->slot_dma + tag * sizeof(struct mvs_cmd_hdr));
+       /*1.command table area */
+       dev_printk(KERN_DEBUG, mvi->dev, "+---->Command Table :\n");
+       mvs_hexdump(slot->cmd_size, (u8 *) slot->buf, (u32) slot->buf_dma);
+       /*2.open address frame area */
+       dev_printk(KERN_DEBUG, mvi->dev, "+---->Open Address Frame :\n");
+       mvs_hexdump(MVS_OAF_SZ, (u8 *) slot->buf + slot->cmd_size,
+                               (u32) slot->buf_dma + slot->cmd_size);
+       /*3.status buffer */
+       mvs_hba_sb_dump(mvi, tag, proto);
+       /*4.PRD table */
+       dev_printk(KERN_DEBUG, mvi->dev, "+---->PRD table :\n");
+       mvs_hexdump(MVS_CHIP_DISP->prd_size() * slot->n_elem,
+               (u8 *) slot->buf + slot->cmd_size + MVS_OAF_SZ,
+               (u32) slot->buf_dma + slot->cmd_size + MVS_OAF_SZ);
+#endif
+}
+
+static void mvs_hba_cq_dump(struct mvs_info *mvi)
+{
+#if (_MV_DUMP > 2)
+       u64 addr;
+       void __iomem *regs = mvi->regs;
+       u32 entry = mvi->rx_cons + 1;
+       u32 rx_desc = le32_to_cpu(mvi->rx[entry]);
+
+       /*Completion Queue */
+       addr = mr32(RX_HI) << 16 << 16 | mr32(RX_LO);
+       dev_printk(KERN_DEBUG, mvi->dev, "Completion Task = 0x%p\n",
+                  mvi->slot_info[rx_desc & RXQ_SLOT_MASK].task);
+       dev_printk(KERN_DEBUG, mvi->dev,
+               "Completion List Base Address=0x%llX (PA), "
+               "CQ_Entry=%04d, CQ_WP=0x%08X\n",
+               addr, entry - 1, mvi->rx[0]);
+       mvs_hexdump(sizeof(u32), (u8 *)(&rx_desc),
+                   mvi->rx_dma + sizeof(u32) * entry);
+#endif
+}
+
+void mvs_get_sas_addr(void *buf, u32 buflen)
+{
+       /*memcpy(buf, "\x50\x05\x04\x30\x11\xab\x64\x40", 8);*/
+}
+
+struct mvs_info *mvs_find_dev_mvi(struct domain_device *dev)
+{
+       unsigned long i = 0, j = 0, hi = 0;
+       struct sas_ha_struct *sha = dev->port->ha;
+       struct mvs_info *mvi = NULL;
+       struct asd_sas_phy *phy;
+
+       while (sha->sas_port[i]) {
+               if (sha->sas_port[i] == dev->port) {
+                       phy =  container_of(sha->sas_port[i]->phy_list.next,
+                               struct asd_sas_phy, port_phy_el);
+                       j = 0;
+                       while (sha->sas_phy[j]) {
+                               if (sha->sas_phy[j] == phy)
+                                       break;
+                               j++;
+                       }
+                       break;
+               }
+               i++;
+       }
+       hi = j/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+       return mvi;
+
+}
+
+/* FIXME */
+int mvs_find_dev_phyno(struct domain_device *dev, int *phyno)
+{
+       unsigned long i = 0, j = 0, n = 0, num = 0;
+       struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+       struct mvs_info *mvi = mvi_dev->mvi_info;
+       struct sas_ha_struct *sha = dev->port->ha;
+
+       while (sha->sas_port[i]) {
+               if (sha->sas_port[i] == dev->port) {
+                       struct asd_sas_phy *phy;
+                       list_for_each_entry(phy,
+                               &sha->sas_port[i]->phy_list, port_phy_el) {
+                               j = 0;
+                               while (sha->sas_phy[j]) {
+                                       if (sha->sas_phy[j] == phy)
+                                               break;
+                                       j++;
+                               }
+                               phyno[n] = (j >= mvi->chip->n_phy) ?
+                                       (j - mvi->chip->n_phy) : j;
+                               num++;
+                               n++;
+                       }
+                       break;
+               }
+               i++;
+       }
+       return num;
+}
+
+static inline void mvs_free_reg_set(struct mvs_info *mvi,
+                               struct mvs_device *dev)
+{
+       if (!dev) {
+               mv_printk("device has been free.\n");
+               return;
+       }
+       if (dev->runing_req != 0)
+               return;
+       if (dev->taskfileset == MVS_ID_NOT_MAPPED)
+               return;
+       MVS_CHIP_DISP->free_reg_set(mvi, &dev->taskfileset);
+}
+
+static inline u8 mvs_assign_reg_set(struct mvs_info *mvi,
+                               struct mvs_device *dev)
+{
+       if (dev->taskfileset != MVS_ID_NOT_MAPPED)
+               return 0;
+       return MVS_CHIP_DISP->assign_reg_set(mvi, &dev->taskfileset);
+}
+
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard)
+{
+       u32 no;
+       for_each_phy(phy_mask, phy_mask, no) {
+               if (!(phy_mask & 1))
+                       continue;
+               MVS_CHIP_DISP->phy_reset(mvi, no, hard);
+       }
+}
+
+/* FIXME: locking? */
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+                       void *funcdata)
+{
+       int rc = 0, phy_id = sas_phy->id;
+       u32 tmp, i = 0, hi;
+       struct sas_ha_struct *sha = sas_phy->ha;
+       struct mvs_info *mvi = NULL;
+
+       while (sha->sas_phy[i]) {
+               if (sha->sas_phy[i] == sas_phy)
+                       break;
+               i++;
+       }
+       hi = i/((struct mvs_prv_info *)sha->lldd_ha)->n_phy;
+       mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[hi];
+
+       switch (func) {
+       case PHY_FUNC_SET_LINK_RATE:
+               MVS_CHIP_DISP->phy_set_link_rate(mvi, phy_id, funcdata);
+               break;
+
+       case PHY_FUNC_HARD_RESET:
+               tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_id);
+               if (tmp & PHY_RST_HARD)
+                       break;
+               MVS_CHIP_DISP->phy_reset(mvi, phy_id, 1);
+               break;
+
+       case PHY_FUNC_LINK_RESET:
+               MVS_CHIP_DISP->phy_enable(mvi, phy_id);
+               MVS_CHIP_DISP->phy_reset(mvi, phy_id, 0);
+               break;
+
+       case PHY_FUNC_DISABLE:
+               MVS_CHIP_DISP->phy_disable(mvi, phy_id);
+               break;
+       case PHY_FUNC_RELEASE_SPINUP_HOLD:
+       default:
+               rc = -EOPNOTSUPP;
+       }
+       msleep(200);
+       return rc;
+}
+
+void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
+                               u32 off_lo, u32 off_hi, u64 sas_addr)
+{
+       u32 lo = (u32)sas_addr;
+       u32 hi = (u32)(sas_addr>>32);
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_lo);
+       MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, lo);
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, port_id, off_hi);
+       MVS_CHIP_DISP->write_port_cfg_data(mvi, port_id, hi);
+}
+
+static void mvs_bytes_dmaed(struct mvs_info *mvi, int i)
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+       struct sas_ha_struct *sas_ha;
+       if (!phy->phy_attached)
+               return;
+
+       if (!(phy->att_dev_info & PORT_DEV_TRGT_MASK)
+               && phy->phy_type & PORT_TYPE_SAS) {
+               return;
+       }
+
+       sas_ha = mvi->sas;
+       sas_ha->notify_phy_event(sas_phy, PHYE_OOB_DONE);
+
+       if (sas_phy->phy) {
+               struct sas_phy *sphy = sas_phy->phy;
+
+               sphy->negotiated_linkrate = sas_phy->linkrate;
+               sphy->minimum_linkrate = phy->minimum_linkrate;
+               sphy->minimum_linkrate_hw = SAS_LINK_RATE_1_5_GBPS;
+               sphy->maximum_linkrate = phy->maximum_linkrate;
+               sphy->maximum_linkrate_hw = MVS_CHIP_DISP->phy_max_link_rate();
+       }
+
+       if (phy->phy_type & PORT_TYPE_SAS) {
+               struct sas_identify_frame *id;
+
+               id = (struct sas_identify_frame *)phy->frame_rcvd;
+               id->dev_type = phy->identify.device_type;
+               id->initiator_bits = SAS_PROTOCOL_ALL;
+               id->target_bits = phy->identify.target_port_protocols;
+       } else if (phy->phy_type & PORT_TYPE_SATA) {
+               /*Nothing*/
+       }
+       mv_dprintk("phy %d byte dmaded.\n", i + mvi->id * mvi->chip->n_phy);
+
+       sas_phy->frame_rcvd_size = phy->frame_rcvd_size;
+
+       mvi->sas->notify_port_event(sas_phy,
+                                  PORTE_BYTES_DMAED);
+}
+
+int mvs_slave_alloc(struct scsi_device *scsi_dev)
+{
+       struct domain_device *dev = sdev_to_domain_dev(scsi_dev);
+       if (dev_is_sata(dev)) {
+               /* We don't need to rescan targets
+                * if REPORT_LUNS request is failed
+                */
+               if (scsi_dev->lun > 0)
+                       return -ENXIO;
+               scsi_dev->tagged_supported = 1;
+       }
+
+       return sas_slave_alloc(scsi_dev);
+}
+
+int mvs_slave_configure(struct scsi_device *sdev)
+{
+       struct domain_device *dev = sdev_to_domain_dev(sdev);
+       int ret = sas_slave_configure(sdev);
+
+       if (ret)
+               return ret;
+       if (dev_is_sata(dev)) {
+               /* may set PIO mode */
+       #if MV_DISABLE_NCQ
+               struct ata_port *ap = dev->sata_dev.ap;
+               struct ata_device *adev = ap->link.device;
+               adev->flags |= ATA_DFLAG_NCQ_OFF;
+               scsi_adjust_queue_depth(sdev, MSG_SIMPLE_TAG, 1);
+       #endif
+       }
+       return 0;
+}
+
+void mvs_scan_start(struct Scsi_Host *shost)
+{
+       int i, j;
+       unsigned short core_nr;
+       struct mvs_info *mvi;
+       struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
+
+       core_nr = ((struct mvs_prv_info *)sha->lldd_ha)->n_host;
+
+       for (j = 0; j < core_nr; j++) {
+               mvi = ((struct mvs_prv_info *)sha->lldd_ha)->mvi[j];
+               for (i = 0; i < mvi->chip->n_phy; ++i)
+                       mvs_bytes_dmaed(mvi, i);
+       }
+}
+
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time)
+{
+       /* give the phy enabling interrupt event time to come in (1s
+        * is empirically about all it takes) */
+       if (time < HZ)
+               return 0;
+       /* Wait for discovery to finish */
+       scsi_flush_work(shost);
+       return 1;
+}
+
+static int mvs_task_prep_smp(struct mvs_info *mvi,
+                            struct mvs_task_exec_info *tei)
+{
+       int elem, rc, i;
+       struct sas_task *task = tei->task;
+       struct mvs_cmd_hdr *hdr = tei->hdr;
+       struct domain_device *dev = task->dev;
+       struct asd_sas_port *sas_port = dev->port;
+       struct scatterlist *sg_req, *sg_resp;
+       u32 req_len, resp_len, tag = tei->tag;
+       void *buf_tmp;
+       u8 *buf_oaf;
+       dma_addr_t buf_tmp_dma;
+       void *buf_prd;
+       struct mvs_slot_info *slot = &mvi->slot_info[tag];
+       u32 flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#if _MV_DUMP
+       u8 *buf_cmd;
+       void *from;
+#endif
+       /*
+        * DMA-map SMP request, response buffers
+        */
+       sg_req = &task->smp_task.smp_req;
+       elem = dma_map_sg(mvi->dev, sg_req, 1, PCI_DMA_TODEVICE);
+       if (!elem)
+               return -ENOMEM;
+       req_len = sg_dma_len(sg_req);
+
+       sg_resp = &task->smp_task.smp_resp;
+       elem = dma_map_sg(mvi->dev, sg_resp, 1, PCI_DMA_FROMDEVICE);
+       if (!elem) {
+               rc = -ENOMEM;
+               goto err_out;
+       }
+       resp_len = SB_RFB_MAX;
+
+       /* must be in dwords */
+       if ((req_len & 0x3) || (resp_len & 0x3)) {
+               rc = -EINVAL;
+               goto err_out_2;
+       }
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+
+       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ***** */
+       buf_tmp = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+
+#if _MV_DUMP
+       buf_cmd = buf_tmp;
+       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+       buf_tmp += req_len;
+       buf_tmp_dma += req_len;
+       slot->cmd_size = req_len;
+#else
+       hdr->cmd_tbl = cpu_to_le64(sg_dma_address(sg_req));
+#endif
+
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+       buf_oaf = buf_tmp;
+       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+
+       /* region 3: PRD table *********************************** */
+       buf_prd = buf_tmp;
+       if (tei->n_elem)
+               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+       else
+               hdr->prd_tbl = 0;
+
+       i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+       buf_tmp += i;
+       buf_tmp_dma += i;
+
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       slot->response = buf_tmp;
+       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+       if (mvi->flags & MVF_FLAG_SOC)
+               hdr->reserved[0] = 0;
+
+       /*
+        * Fill in TX ring and command slot header
+        */
+       slot->tx = mvi->tx_prod;
+       mvi->tx[mvi->tx_prod] = cpu_to_le32((TXQ_CMD_SMP << TXQ_CMD_SHIFT) |
+                                       TXQ_MODE_I | tag |
+                                       (sas_port->phy_mask << TXQ_PHY_SHIFT));
+
+       hdr->flags |= flags;
+       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | ((req_len - 4) / 4));
+       hdr->tags = cpu_to_le32(tag);
+       hdr->data_len = 0;
+
+       /* generate open address frame hdr (first 12 bytes) */
+       /* initiator, SMP, ftype 1h */
+       buf_oaf[0] = (1 << 7) | (PROTOCOL_SMP << 4) | 0x01;
+       buf_oaf[1] = dev->linkrate & 0xf;
+       *(u16 *)(buf_oaf + 2) = 0xFFFF;         /* SAS SPEC */
+       memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+       /* fill in PRD (scatter/gather) table, if any */
+       MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+
+#if _MV_DUMP
+       /* copy cmd table */
+       from = kmap_atomic(sg_page(sg_req), KM_IRQ0);
+       memcpy(buf_cmd, from + sg_req->offset, req_len);
+       kunmap_atomic(from, KM_IRQ0);
+#endif
+       return 0;
+
+err_out_2:
+       dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_resp, 1,
+                    PCI_DMA_FROMDEVICE);
+err_out:
+       dma_unmap_sg(mvi->dev, &tei->task->smp_task.smp_req, 1,
+                    PCI_DMA_TODEVICE);
+       return rc;
+}
+
+static u32 mvs_get_ncq_tag(struct sas_task *task, u32 *tag)
+{
+       struct ata_queued_cmd *qc = task->uldd_task;
+
+       if (qc) {
+               if (qc->tf.command == ATA_CMD_FPDMA_WRITE ||
+                       qc->tf.command == ATA_CMD_FPDMA_READ) {
+                       *tag = qc->tag;
+                       return 1;
+               }
+       }
+
+       return 0;
+}
+
+static int mvs_task_prep_ata(struct mvs_info *mvi,
+                            struct mvs_task_exec_info *tei)
+{
+       struct sas_task *task = tei->task;
+       struct domain_device *dev = task->dev;
+       struct mvs_device *mvi_dev = dev->lldd_dev;
+       struct mvs_cmd_hdr *hdr = tei->hdr;
+       struct asd_sas_port *sas_port = dev->port;
+       struct mvs_slot_info *slot;
+       void *buf_prd;
+       u32 tag = tei->tag, hdr_tag;
+       u32 flags, del_q;
+       void *buf_tmp;
+       u8 *buf_cmd, *buf_oaf;
+       dma_addr_t buf_tmp_dma;
+       u32 i, req_len, resp_len;
+       const u32 max_resp_len = SB_RFB_MAX;
+
+       if (mvs_assign_reg_set(mvi, mvi_dev) == MVS_ID_NOT_MAPPED) {
+               mv_dprintk("Have not enough regiset for dev %d.\n",
+                       mvi_dev->device_id);
+               return -EBUSY;
+       }
+       slot = &mvi->slot_info[tag];
+       slot->tx = mvi->tx_prod;
+       del_q = TXQ_MODE_I | tag |
+               (TXQ_CMD_STP << TXQ_CMD_SHIFT) |
+               (sas_port->phy_mask << TXQ_PHY_SHIFT) |
+               (mvi_dev->taskfileset << TXQ_SRS_SHIFT);
+       mvi->tx[mvi->tx_prod] = cpu_to_le32(del_q);
+
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       if (task->data_dir == DMA_FROM_DEVICE)
+               flags = (MVS_CHIP_DISP->prd_count() << MCH_PRD_LEN_SHIFT);
+       else
+               flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#else
+       flags = (tei->n_elem << MCH_PRD_LEN_SHIFT);
+#endif
+       if (task->ata_task.use_ncq)
+               flags |= MCH_FPDMA;
+       if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) {
+               if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
+                       flags |= MCH_ATAPI;
+       }
+
+       /* FIXME: fill in port multiplier number */
+
+       hdr->flags = cpu_to_le32(flags);
+
+       /* FIXME: the low order order 5 bits for the TAG if enable NCQ */
+       if (task->ata_task.use_ncq && mvs_get_ncq_tag(task, &hdr_tag))
+               task->ata_task.fis.sector_count |= (u8) (hdr_tag << 3);
+       else
+               hdr_tag = tag;
+
+       hdr->tags = cpu_to_le32(hdr_tag);
+
+       hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+
+       /* region 1: command table area (MVS_ATA_CMD_SZ bytes) ************** */
+       buf_cmd = buf_tmp = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+
+       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_ATA_CMD_SZ;
+       buf_tmp_dma += MVS_ATA_CMD_SZ;
+#if _MV_DUMP
+       slot->cmd_size = MVS_ATA_CMD_SZ;
+#endif
+
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+       /* used for STP.  unused for SATA? */
+       buf_oaf = buf_tmp;
+       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+
+       /* region 3: PRD table ********************************************* */
+       buf_prd = buf_tmp;
+
+       if (tei->n_elem)
+               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+       else
+               hdr->prd_tbl = 0;
+       i = MVS_CHIP_DISP->prd_size() * MVS_CHIP_DISP->prd_count();
+
+       buf_tmp += i;
+       buf_tmp_dma += i;
+
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       /* FIXME: probably unused, for SATA.  kept here just in case
+        * we get a STP/SATA error information record
+        */
+       slot->response = buf_tmp;
+       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+       if (mvi->flags & MVF_FLAG_SOC)
+               hdr->reserved[0] = 0;
+
+       req_len = sizeof(struct host_to_dev_fis);
+       resp_len = MVS_SLOT_BUF_SZ - MVS_ATA_CMD_SZ -
+           sizeof(struct mvs_err_info) - i;
+
+       /* request, response lengths */
+       resp_len = min(resp_len, max_resp_len);
+       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+       if (likely(!task->ata_task.device_control_reg_update))
+               task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
+       /* fill in command FIS and ATAPI CDB */
+       memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
+       if (dev->sata_dev.command_set == ATAPI_COMMAND_SET)
+               memcpy(buf_cmd + STP_ATAPI_CMD,
+                       task->ata_task.atapi_packet, 16);
+
+       /* generate open address frame hdr (first 12 bytes) */
+       /* initiator, STP, ftype 1h */
+       buf_oaf[0] = (1 << 7) | (PROTOCOL_STP << 4) | 0x1;
+       buf_oaf[1] = dev->linkrate & 0xf;
+       *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+       memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+       /* fill in PRD (scatter/gather) table, if any */
+       MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       if (task->data_dir == DMA_FROM_DEVICE)
+               MVS_CHIP_DISP->dma_fix(mvi->bulk_buffer_dma,
+                               TRASH_BUCKET_SIZE, tei->n_elem, buf_prd);
+#endif
+       return 0;
+}
+
+static int mvs_task_prep_ssp(struct mvs_info *mvi,
+                            struct mvs_task_exec_info *tei, int is_tmf,
+                            struct mvs_tmf_task *tmf)
+{
+       struct sas_task *task = tei->task;
+       struct mvs_cmd_hdr *hdr = tei->hdr;
+       struct mvs_port *port = tei->port;
+       struct domain_device *dev = task->dev;
+       struct mvs_device *mvi_dev = dev->lldd_dev;
+       struct asd_sas_port *sas_port = dev->port;
+       struct mvs_slot_info *slot;
+       void *buf_prd;
+       struct ssp_frame_hdr *ssp_hdr;
+       void *buf_tmp;
+       u8 *buf_cmd, *buf_oaf, fburst = 0;
+       dma_addr_t buf_tmp_dma;
+       u32 flags;
+       u32 resp_len, req_len, i, tag = tei->tag;
+       const u32 max_resp_len = SB_RFB_MAX;
+       u32 phy_mask;
+
+       slot = &mvi->slot_info[tag];
+
+       phy_mask = ((port->wide_port_phymap) ? port->wide_port_phymap :
+               sas_port->phy_mask) & TXQ_PHY_MASK;
+
+       slot->tx = mvi->tx_prod;
+       mvi->tx[mvi->tx_prod] = cpu_to_le32(TXQ_MODE_I | tag |
+                               (TXQ_CMD_SSP << TXQ_CMD_SHIFT) |
+                               (phy_mask << TXQ_PHY_SHIFT));
+
+       flags = MCH_RETRY;
+       if (task->ssp_task.enable_first_burst) {
+               flags |= MCH_FBURST;
+               fburst = (1 << 7);
+       }
+       if (is_tmf)
+               flags |= (MCH_SSP_FR_TASK << MCH_SSP_FR_TYPE_SHIFT);
+       else
+               flags |= (MCH_SSP_FR_CMD << MCH_SSP_FR_TYPE_SHIFT);
+       hdr->flags = cpu_to_le32(flags | (tei->n_elem << MCH_PRD_LEN_SHIFT));
+       hdr->tags = cpu_to_le32(tag);
+       hdr->data_len = cpu_to_le32(task->total_xfer_len);
+
+       /*
+        * arrange MVS_SLOT_BUF_SZ-sized DMA buffer according to our needs
+        */
+
+       /* region 1: command table area (MVS_SSP_CMD_SZ bytes) ************** */
+       buf_cmd = buf_tmp = slot->buf;
+       buf_tmp_dma = slot->buf_dma;
+
+       hdr->cmd_tbl = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_SSP_CMD_SZ;
+       buf_tmp_dma += MVS_SSP_CMD_SZ;
+#if _MV_DUMP
+       slot->cmd_size = MVS_SSP_CMD_SZ;
+#endif
+
+       /* region 2: open address frame area (MVS_OAF_SZ bytes) ********* */
+       buf_oaf = buf_tmp;
+       hdr->open_frame = cpu_to_le64(buf_tmp_dma);
+
+       buf_tmp += MVS_OAF_SZ;
+       buf_tmp_dma += MVS_OAF_SZ;
+
+       /* region 3: PRD table ********************************************* */
+       buf_prd = buf_tmp;
+       if (tei->n_elem)
+               hdr->prd_tbl = cpu_to_le64(buf_tmp_dma);
+       else
+               hdr->prd_tbl = 0;
+
+       i = MVS_CHIP_DISP->prd_size() * tei->n_elem;
+       buf_tmp += i;
+       buf_tmp_dma += i;
+
+       /* region 4: status buffer (larger the PRD, smaller this buf) ****** */
+       slot->response = buf_tmp;
+       hdr->status_buf = cpu_to_le64(buf_tmp_dma);
+       if (mvi->flags & MVF_FLAG_SOC)
+               hdr->reserved[0] = 0;
+
+       resp_len = MVS_SLOT_BUF_SZ - MVS_SSP_CMD_SZ - MVS_OAF_SZ -
+           sizeof(struct mvs_err_info) - i;
+       resp_len = min(resp_len, max_resp_len);
+
+       req_len = sizeof(struct ssp_frame_hdr) + 28;
+
+       /* request, response lengths */
+       hdr->lens = cpu_to_le32(((resp_len / 4) << 16) | (req_len / 4));
+
+       /* generate open address frame hdr (first 12 bytes) */
+       /* initiator, SSP, ftype 1h */
+       buf_oaf[0] = (1 << 7) | (PROTOCOL_SSP << 4) | 0x1;
+       buf_oaf[1] = dev->linkrate & 0xf;
+       *(u16 *)(buf_oaf + 2) = cpu_to_be16(mvi_dev->device_id + 1);
+       memcpy(buf_oaf + 4, dev->sas_addr, SAS_ADDR_SIZE);
+
+       /* fill in SSP frame header (Command Table.SSP frame header) */
+       ssp_hdr = (struct ssp_frame_hdr *)buf_cmd;
+
+       if (is_tmf)
+               ssp_hdr->frame_type = SSP_TASK;
+       else
+               ssp_hdr->frame_type = SSP_COMMAND;
+
+       memcpy(ssp_hdr->hashed_dest_addr, dev->hashed_sas_addr,
+              HASHED_SAS_ADDR_SIZE);
+       memcpy(ssp_hdr->hashed_src_addr,
+              dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
+       ssp_hdr->tag = cpu_to_be16(tag);
+
+       /* fill in IU for TASK and Command Frame */
+       buf_cmd += sizeof(*ssp_hdr);
+       memcpy(buf_cmd, &task->ssp_task.LUN, 8);
+
+       if (ssp_hdr->frame_type != SSP_TASK) {
+               buf_cmd[9] = fburst | task->ssp_task.task_attr |
+                               (task->ssp_task.task_prio << 3);
+               memcpy(buf_cmd + 12, &task->ssp_task.cdb, 16);
+       } else{
+               buf_cmd[10] = tmf->tmf;
+               switch (tmf->tmf) {
+               case TMF_ABORT_TASK:
+               case TMF_QUERY_TASK:
+                       buf_cmd[12] =
+                               (tmf->tag_of_task_to_be_managed >> 8) & 0xff;
+                       buf_cmd[13] =
+                               tmf->tag_of_task_to_be_managed & 0xff;
+                       break;
+               default:
+                       break;
+               }
+       }
+       /* fill in PRD (scatter/gather) table, if any */
+       MVS_CHIP_DISP->make_prd(task->scatter, tei->n_elem, buf_prd);
+       return 0;
+}
+
+#define        DEV_IS_GONE(mvi_dev)    ((!mvi_dev || (mvi_dev->dev_type == NO_DEVICE)))
+static int mvs_task_exec(struct sas_task *task, const int num, gfp_t gfp_flags,
+                               struct completion *completion,int is_tmf,
+                               struct mvs_tmf_task *tmf)
+{
+       struct domain_device *dev = task->dev;
+       struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+       struct mvs_info *mvi = mvi_dev->mvi_info;
+       struct mvs_task_exec_info tei;
+       struct sas_task *t = task;
+       struct mvs_slot_info *slot;
+       u32 tag = 0xdeadbeef, rc, n_elem = 0;
+       u32 n = num, pass = 0;
+       unsigned long flags = 0;
+
+       if (!dev->port) {
+               struct task_status_struct *tsm = &t->task_status;
+
+               tsm->resp = SAS_TASK_UNDELIVERED;
+               tsm->stat = SAS_PHY_DOWN;
+               t->task_done(t);
+               return 0;
+       }
+
+       spin_lock_irqsave(&mvi->lock, flags);
+       do {
+               dev = t->dev;
+               mvi_dev = dev->lldd_dev;
+               if (DEV_IS_GONE(mvi_dev)) {
+                       if (mvi_dev)
+                               mv_dprintk("device %d not ready.\n",
+                                       mvi_dev->device_id);
+                       else
+                               mv_dprintk("device %016llx not ready.\n",
+                                       SAS_ADDR(dev->sas_addr));
+
+                       rc = SAS_PHY_DOWN;
+                       goto out_done;
+               }
+
+               if (dev->port->id >= mvi->chip->n_phy)
+                       tei.port = &mvi->port[dev->port->id - mvi->chip->n_phy];
+               else
+                       tei.port = &mvi->port[dev->port->id];
+
+               if (!tei.port->port_attached) {
+                       if (sas_protocol_ata(t->task_proto)) {
+                               mv_dprintk("port %d does not"
+                                       "attached device.\n", dev->port->id);
+                               rc = SAS_PHY_DOWN;
+                               goto out_done;
+                       } else {
+                               struct task_status_struct *ts = &t->task_status;
+                               ts->resp = SAS_TASK_UNDELIVERED;
+                               ts->stat = SAS_PHY_DOWN;
+                               t->task_done(t);
+                               if (n > 1)
+                                       t = list_entry(t->list.next,
+                                                       struct sas_task, list);
+                               continue;
+                       }
+               }
+
+               if (!sas_protocol_ata(t->task_proto)) {
+                       if (t->num_scatter) {
+                               n_elem = dma_map_sg(mvi->dev,
+                                                   t->scatter,
+                                                   t->num_scatter,
+                                                   t->data_dir);
+                               if (!n_elem) {
+                                       rc = -ENOMEM;
+                                       goto err_out;
+                               }
+                       }
+               } else {
+                       n_elem = t->num_scatter;
+               }
+
+               rc = mvs_tag_alloc(mvi, &tag);
+               if (rc)
+                       goto err_out;
+
+               slot = &mvi->slot_info[tag];
+
+
+               t->lldd_task = NULL;
+               slot->n_elem = n_elem;
+               slot->slot_tag = tag;
+               memset(slot->buf, 0, MVS_SLOT_BUF_SZ);
+
+               tei.task = t;
+               tei.hdr = &mvi->slot[tag];
+               tei.tag = tag;
+               tei.n_elem = n_elem;
+               switch (t->task_proto) {
+               case SAS_PROTOCOL_SMP:
+                       rc = mvs_task_prep_smp(mvi, &tei);
+                       break;
+               case SAS_PROTOCOL_SSP:
+                       rc = mvs_task_prep_ssp(mvi, &tei, is_tmf, tmf);
+                       break;
+               case SAS_PROTOCOL_SATA:
+               case SAS_PROTOCOL_STP:
+               case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+                       rc = mvs_task_prep_ata(mvi, &tei);
+                       break;
+               default:
+                       dev_printk(KERN_ERR, mvi->dev,
+                               "unknown sas_task proto: 0x%x\n",
+                               t->task_proto);
+                       rc = -EINVAL;
+                       break;
+               }
+
+               if (rc) {
+                       mv_dprintk("rc is %x\n", rc);
+                       goto err_out_tag;
+               }
+               slot->task = t;
+               slot->port = tei.port;
+               t->lldd_task = slot;
+               list_add_tail(&slot->entry, &tei.port->list);
+               /* TODO: select normal or high priority */
+               spin_lock(&t->task_state_lock);
+               t->task_state_flags |= SAS_TASK_AT_INITIATOR;
+               spin_unlock(&t->task_state_lock);
+
+               mvs_hba_memory_dump(mvi, tag, t->task_proto);
+               mvi_dev->runing_req++;
+               ++pass;
+               mvi->tx_prod = (mvi->tx_prod + 1) & (MVS_CHIP_SLOT_SZ - 1);
+               if (n > 1)
+                       t = list_entry(t->list.next, struct sas_task, list);
+       } while (--n);
+       rc = 0;
+       goto out_done;
+
+err_out_tag:
+       mvs_tag_free(mvi, tag);
+err_out:
+
+       dev_printk(KERN_ERR, mvi->dev, "mvsas exec failed[%d]!\n", rc);
+       if (!sas_protocol_ata(t->task_proto))
+               if (n_elem)
+                       dma_unmap_sg(mvi->dev, t->scatter, n_elem,
+                                    t->data_dir);
+out_done:
+       if (likely(pass)) {
+               MVS_CHIP_DISP->start_delivery(mvi,
+                       (mvi->tx_prod - 1) & (MVS_CHIP_SLOT_SZ - 1));
+       }
+       spin_unlock_irqrestore(&mvi->lock, flags);
+       return rc;
+}
+
+int mvs_queue_command(struct sas_task *task, const int num,
+                       gfp_t gfp_flags)
+{
+       return mvs_task_exec(task, num, gfp_flags, NULL, 0, NULL);
+}
+
+static void mvs_slot_free(struct mvs_info *mvi, u32 rx_desc)
+{
+       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+       mvs_tag_clear(mvi, slot_idx);
+}
+
+static void mvs_slot_task_free(struct mvs_info *mvi, struct sas_task *task,
+                         struct mvs_slot_info *slot, u32 slot_idx)
+{
+       if (!slot->task)
+               return;
+       if (!sas_protocol_ata(task->task_proto))
+               if (slot->n_elem)
+                       dma_unmap_sg(mvi->dev, task->scatter,
+                                    slot->n_elem, task->data_dir);
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SMP:
+               dma_unmap_sg(mvi->dev, &task->smp_task.smp_resp, 1,
+                            PCI_DMA_FROMDEVICE);
+               dma_unmap_sg(mvi->dev, &task->smp_task.smp_req, 1,
+                            PCI_DMA_TODEVICE);
+               break;
+
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SSP:
+       default:
+               /* do nothing */
+               break;
+       }
+       list_del_init(&slot->entry);
+       task->lldd_task = NULL;
+       slot->task = NULL;
+       slot->port = NULL;
+       slot->slot_tag = 0xFFFFFFFF;
+       mvs_slot_free(mvi, slot_idx);
+}
+
+static void mvs_update_wideport(struct mvs_info *mvi, int i)
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct mvs_port *port = phy->port;
+       int j, no;
+
+       for_each_phy(port->wide_port_phymap, j, no) {
+               if (j & 1) {
+                       MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+                                               PHYR_WIDE_PORT);
+                       MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+                                               port->wide_port_phymap);
+               } else {
+                       MVS_CHIP_DISP->write_port_cfg_addr(mvi, no,
+                                               PHYR_WIDE_PORT);
+                       MVS_CHIP_DISP->write_port_cfg_data(mvi, no,
+                                               0);
+               }
+       }
+}
+
+static u32 mvs_is_phy_ready(struct mvs_info *mvi, int i)
+{
+       u32 tmp;
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct mvs_port *port = phy->port;
+
+       tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, i);
+       if ((tmp & PHY_READY_MASK) && !(phy->irq_status & PHYEV_POOF)) {
+               if (!port)
+                       phy->phy_attached = 1;
+               return tmp;
+       }
+
+       if (port) {
+               if (phy->phy_type & PORT_TYPE_SAS) {
+                       port->wide_port_phymap &= ~(1U << i);
+                       if (!port->wide_port_phymap)
+                               port->port_attached = 0;
+                       mvs_update_wideport(mvi, i);
+               } else if (phy->phy_type & PORT_TYPE_SATA)
+                       port->port_attached = 0;
+               phy->port = NULL;
+               phy->phy_attached = 0;
+               phy->phy_type &= ~(PORT_TYPE_SAS | PORT_TYPE_SATA);
+       }
+       return 0;
+}
+
+static void *mvs_get_d2h_reg(struct mvs_info *mvi, int i, void *buf)
+{
+       u32 *s = (u32 *) buf;
+
+       if (!s)
+               return NULL;
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG3);
+       s[3] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG2);
+       s[2] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG1);
+       s[1] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+       MVS_CHIP_DISP->write_port_cfg_addr(mvi, i, PHYR_SATA_SIG0);
+       s[0] = MVS_CHIP_DISP->read_port_cfg_data(mvi, i);
+
+       /* Workaround: take some ATAPI devices for ATA */
+       if (((s[1] & 0x00FFFFFF) == 0x00EB1401) && (*(u8 *)&s[3] == 0x01))
+               s[1] = 0x00EB1401 | (*((u8 *)&s[1] + 3) & 0x10);
+
+       return s;
+}
+
+static u32 mvs_is_sig_fis_received(u32 irq_status)
+{
+       return irq_status & PHYEV_SIG_FIS;
+}
+
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st)
+{
+       struct mvs_phy *phy = &mvi->phy[i];
+       struct sas_identify_frame *id;
+
+       id = (struct sas_identify_frame *)phy->frame_rcvd;
+
+       if (get_st) {
+               phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, i);
+               phy->phy_status = mvs_is_phy_ready(mvi, i);
+       }
+
+       if (phy->phy_status) {
+               int oob_done = 0;
+               struct asd_sas_phy *sas_phy = &mvi->phy[i].sas_phy;
+
+               oob_done = MVS_CHIP_DISP->oob_done(mvi, i);
+
+               MVS_CHIP_DISP->fix_phy_info(mvi, i, id);
+               if (phy->phy_type & PORT_TYPE_SATA) {
+                       phy->identify.target_port_protocols = SAS_PROTOCOL_STP;
+                       if (mvs_is_sig_fis_received(phy->irq_status)) {
+                               phy->phy_attached = 1;
+                               phy->att_dev_sas_addr =
+                                       i + mvi->id * mvi->chip->n_phy;
+                               if (oob_done)
+                                       sas_phy->oob_mode = SATA_OOB_MODE;
+                               phy->frame_rcvd_size =
+                                   sizeof(struct dev_to_host_fis);
+                               mvs_get_d2h_reg(mvi, i, id);
+                       } else {
+                               u32 tmp;
+                               dev_printk(KERN_DEBUG, mvi->dev,
+                                       "Phy%d : No sig fis\n", i);
+                               tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, i);
+                               MVS_CHIP_DISP->write_port_irq_mask(mvi, i,
+                                               tmp | PHYEV_SIG_FIS);
+                               phy->phy_attached = 0;
+                               phy->phy_type &= ~PORT_TYPE_SATA;
+                               MVS_CHIP_DISP->phy_reset(mvi, i, 0);
+                               goto out_done;
+                       }
+               }               else if (phy->phy_type & PORT_TYPE_SAS
+                       || phy->att_dev_info & PORT_SSP_INIT_MASK) {
+                       phy->phy_attached = 1;
+                       phy->identify.device_type =
+                               phy->att_dev_info & PORT_DEV_TYPE_MASK;
+
+                       if (phy->identify.device_type == SAS_END_DEV)
+                               phy->identify.target_port_protocols =
+                                                       SAS_PROTOCOL_SSP;
+                       else if (phy->identify.device_type != NO_DEVICE)
+                               phy->identify.target_port_protocols =
+                                                       SAS_PROTOCOL_SMP;
+                       if (oob_done)
+                               sas_phy->oob_mode = SAS_OOB_MODE;
+                       phy->frame_rcvd_size =
+                           sizeof(struct sas_identify_frame);
+               }
+               memcpy(sas_phy->attached_sas_addr,
+                       &phy->att_dev_sas_addr, SAS_ADDR_SIZE);
+
+               if (MVS_CHIP_DISP->phy_work_around)
+                       MVS_CHIP_DISP->phy_work_around(mvi, i);
+       }
+       mv_dprintk("port %d attach dev info is %x\n",
+               i + mvi->id * mvi->chip->n_phy, phy->att_dev_info);
+       mv_dprintk("port %d attach sas addr is %llx\n",
+               i + mvi->id * mvi->chip->n_phy, phy->att_dev_sas_addr);
+out_done:
+       if (get_st)
+               MVS_CHIP_DISP->write_port_irq_stat(mvi, i, phy->irq_status);
+}
+
+static void mvs_port_notify_formed(struct asd_sas_phy *sas_phy, int lock)
+{
+       struct sas_ha_struct *sas_ha = sas_phy->ha;
+       struct mvs_info *mvi = NULL; int i = 0, hi;
+       struct mvs_phy *phy = sas_phy->lldd_phy;
+       struct asd_sas_port *sas_port = sas_phy->port;
+       struct mvs_port *port;
+       unsigned long flags = 0;
+       if (!sas_port)
+               return;
+
+       while (sas_ha->sas_phy[i]) {
+               if (sas_ha->sas_phy[i] == sas_phy)
+                       break;
+               i++;
+       }
+       hi = i/((struct mvs_prv_info *)sas_ha->lldd_ha)->n_phy;
+       mvi = ((struct mvs_prv_info *)sas_ha->lldd_ha)->mvi[hi];
+       if (sas_port->id >= mvi->chip->n_phy)
+               port = &mvi->port[sas_port->id - mvi->chip->n_phy];
+       else
+               port = &mvi->port[sas_port->id];
+       if (lock)
+               spin_lock_irqsave(&mvi->lock, flags);
+       port->port_attached = 1;
+       phy->port = port;
+       if (phy->phy_type & PORT_TYPE_SAS) {
+               port->wide_port_phymap = sas_port->phy_mask;
+               mv_printk("set wide port phy map %x\n", sas_port->phy_mask);
+               mvs_update_wideport(mvi, sas_phy->id);
+       }
+       if (lock)
+               spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+static void mvs_port_notify_deformed(struct asd_sas_phy *sas_phy, int lock)
+{
+       /*Nothing*/
+}
+
+
+void mvs_port_formed(struct asd_sas_phy *sas_phy)
+{
+       mvs_port_notify_formed(sas_phy, 1);
+}
+
+void mvs_port_deformed(struct asd_sas_phy *sas_phy)
+{
+       mvs_port_notify_deformed(sas_phy, 1);
+}
+
+struct mvs_device *mvs_alloc_dev(struct mvs_info *mvi)
+{
+       u32 dev;
+       for (dev = 0; dev < MVS_MAX_DEVICES; dev++) {
+               if (mvi->devices[dev].dev_type == NO_DEVICE) {
+                       mvi->devices[dev].device_id = dev;
+                       return &mvi->devices[dev];
+               }
+       }
+
+       if (dev == MVS_MAX_DEVICES)
+               mv_printk("max support %d devices, ignore ..\n",
+                       MVS_MAX_DEVICES);
+
+       return NULL;
+}
+
+void mvs_free_dev(struct mvs_device *mvi_dev)
+{
+       u32 id = mvi_dev->device_id;
+       memset(mvi_dev, 0, sizeof(*mvi_dev));
+       mvi_dev->device_id = id;
+       mvi_dev->dev_type = NO_DEVICE;
+       mvi_dev->dev_status = MVS_DEV_NORMAL;
+       mvi_dev->taskfileset = MVS_ID_NOT_MAPPED;
+}
+
+int mvs_dev_found_notify(struct domain_device *dev, int lock)
+{
+       unsigned long flags = 0;
+       int res = 0;
+       struct mvs_info *mvi = NULL;
+       struct domain_device *parent_dev = dev->parent;
+       struct mvs_device *mvi_device;
+
+       mvi = mvs_find_dev_mvi(dev);
+
+       if (lock)
+               spin_lock_irqsave(&mvi->lock, flags);
+
+       mvi_device = mvs_alloc_dev(mvi);
+       if (!mvi_device) {
+               res = -1;
+               goto found_out;
+       }
+       dev->lldd_dev = mvi_device;
+       mvi_device->dev_type = dev->dev_type;
+       mvi_device->mvi_info = mvi;
+       if (parent_dev && DEV_IS_EXPANDER(parent_dev->dev_type)) {
+               int phy_id;
+               u8 phy_num = parent_dev->ex_dev.num_phys;
+               struct ex_phy *phy;
+               for (phy_id = 0; phy_id < phy_num; phy_id++) {
+                       phy = &parent_dev->ex_dev.ex_phy[phy_id];
+                       if (SAS_ADDR(phy->attached_sas_addr) ==
+                               SAS_ADDR(dev->sas_addr)) {
+                               mvi_device->attached_phy = phy_id;
+                               break;
+                       }
+               }
+
+               if (phy_id == phy_num) {
+                       mv_printk("Error: no attached dev:%016llx"
+                               "at ex:%016llx.\n",
+                               SAS_ADDR(dev->sas_addr),
+                               SAS_ADDR(parent_dev->sas_addr));
+                       res = -1;
+               }
+       }
+
+found_out:
+       if (lock)
+               spin_unlock_irqrestore(&mvi->lock, flags);
+       return res;
+}
+
+int mvs_dev_found(struct domain_device *dev)
+{
+       return mvs_dev_found_notify(dev, 1);
+}
+
+void mvs_dev_gone_notify(struct domain_device *dev, int lock)
+{
+       unsigned long flags = 0;
+       struct mvs_device *mvi_dev = dev->lldd_dev;
+       struct mvs_info *mvi = mvi_dev->mvi_info;
+
+       if (lock)
+               spin_lock_irqsave(&mvi->lock, flags);
+
+       if (mvi_dev) {
+               mv_dprintk("found dev[%d:%x] is gone.\n",
+                       mvi_dev->device_id, mvi_dev->dev_type);
+               mvs_free_reg_set(mvi, mvi_dev);
+               mvs_free_dev(mvi_dev);
+       } else {
+               mv_dprintk("found dev has gone.\n");
+       }
+       dev->lldd_dev = NULL;
+
+       if (lock)
+               spin_unlock_irqrestore(&mvi->lock, flags);
+}
+
+
+void mvs_dev_gone(struct domain_device *dev)
+{
+       mvs_dev_gone_notify(dev, 1);
+}
+
+static  struct sas_task *mvs_alloc_task(void)
+{
+       struct sas_task *task = kzalloc(sizeof(struct sas_task), GFP_KERNEL);
+
+       if (task) {
+               INIT_LIST_HEAD(&task->list);
+               spin_lock_init(&task->task_state_lock);
+               task->task_state_flags = SAS_TASK_STATE_PENDING;
+               init_timer(&task->timer);
+               init_completion(&task->completion);
+       }
+       return task;
+}
+
+static  void mvs_free_task(struct sas_task *task)
+{
+       if (task) {
+               BUG_ON(!list_empty(&task->list));
+               kfree(task);
+       }
+}
+
+static void mvs_task_done(struct sas_task *task)
+{
+       if (!del_timer(&task->timer))
+               return;
+       complete(&task->completion);
+}
+
+static void mvs_tmf_timedout(unsigned long data)
+{
+       struct sas_task *task = (struct sas_task *)data;
+
+       task->task_state_flags |= SAS_TASK_STATE_ABORTED;
+       complete(&task->completion);
+}
+
+/* XXX */
+#define MVS_TASK_TIMEOUT 20
+static int mvs_exec_internal_tmf_task(struct domain_device *dev,
+                       void *parameter, u32 para_len, struct mvs_tmf_task *tmf)
+{
+       int res, retry;
+       struct sas_task *task = NULL;
+
+       for (retry = 0; retry < 3; retry++) {
+               task = mvs_alloc_task();
+               if (!task)
+                       return -ENOMEM;
+
+               task->dev = dev;
+               task->task_proto = dev->tproto;
+
+               memcpy(&task->ssp_task, parameter, para_len);
+               task->task_done = mvs_task_done;
+
+               task->timer.data = (unsigned long) task;
+               task->timer.function = mvs_tmf_timedout;
+               task->timer.expires = jiffies + MVS_TASK_TIMEOUT*HZ;
+               add_timer(&task->timer);
+
+               res = mvs_task_exec(task, 1, GFP_KERNEL, NULL, 1, tmf);
+
+               if (res) {
+                       del_timer(&task->timer);
+                       mv_printk("executing internel task failed:%d\n", res);
+                       goto ex_err;
+               }
+
+               wait_for_completion(&task->completion);
+               res = -TMF_RESP_FUNC_FAILED;
+               /* Even TMF timed out, return direct. */
+               if ((task->task_state_flags & SAS_TASK_STATE_ABORTED)) {
+                       if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) {
+                               mv_printk("TMF task[%x] timeout.\n", tmf->tmf);
+                               goto ex_err;
+                       }
+               }
+
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                   task->task_status.stat == SAM_GOOD) {
+                       res = TMF_RESP_FUNC_COMPLETE;
+                       break;
+               }
+
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_UNDERRUN) {
+                       /* no error, but return the number of bytes of
+                        * underrun */
+                       res = task->task_status.residual;
+                       break;
+               }
+
+               if (task->task_status.resp == SAS_TASK_COMPLETE &&
+                     task->task_status.stat == SAS_DATA_OVERRUN) {
+                       mv_dprintk("blocked task error.\n");
+                       res = -EMSGSIZE;
+                       break;
+               } else {
+                       mv_dprintk(" task to dev %016llx response: 0x%x "
+                                   "status 0x%x\n",
+                                   SAS_ADDR(dev->sas_addr),
+                                   task->task_status.resp,
+                                   task->task_status.stat);
+                       mvs_free_task(task);
+                       task = NULL;
+
+               }
+       }
+ex_err:
+       BUG_ON(retry == 3 && task != NULL);
+       if (task != NULL)
+               mvs_free_task(task);
+       return res;
+}
+
+static int mvs_debug_issue_ssp_tmf(struct domain_device *dev,
+                               u8 *lun, struct mvs_tmf_task *tmf)
+{
+       struct sas_ssp_task ssp_task;
+       DECLARE_COMPLETION_ONSTACK(completion);
+       if (!(dev->tproto & SAS_PROTOCOL_SSP))
+               return TMF_RESP_FUNC_ESUPP;
+
+       strncpy((u8 *)&ssp_task.LUN, lun, 8);
+
+       return mvs_exec_internal_tmf_task(dev, &ssp_task,
+                               sizeof(ssp_task), tmf);
+}
+
+
+/*  Standard mandates link reset for ATA  (type 0)
+    and hard reset for SSP (type 1) , only for RECOVERY */
+static int mvs_debug_I_T_nexus_reset(struct domain_device *dev)
+{
+       int rc;
+       struct sas_phy *phy = sas_find_local_phy(dev);
+       int reset_type = (dev->dev_type == SATA_DEV ||
+                       (dev->tproto & SAS_PROTOCOL_STP)) ? 0 : 1;
+       rc = sas_phy_reset(phy, reset_type);
+       msleep(2000);
+       return rc;
+}
+
+/* mandatory SAM-3 */
+int mvs_lu_reset(struct domain_device *dev, u8 *lun)
+{
+       unsigned long flags;
+       int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_tmf_task tmf_task;
+       struct mvs_device * mvi_dev = dev->lldd_dev;
+       struct mvs_info *mvi = mvi_dev->mvi_info;
+
+       tmf_task.tmf = TMF_LU_RESET;
+       mvi_dev->dev_status = MVS_DEV_EH;
+       rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+       if (rc == TMF_RESP_FUNC_COMPLETE) {
+               num = mvs_find_dev_phyno(dev, phyno);
+               spin_lock_irqsave(&mvi->lock, flags);
+               for (i = 0; i < num; i++)
+                       mvs_release_task(mvi, phyno[i], dev);
+               spin_unlock_irqrestore(&mvi->lock, flags);
+       }
+       /* If failed, fall-through I_T_Nexus reset */
+       mv_printk("%s for device[%x]:rc= %d\n", __func__,
+                       mvi_dev->device_id, rc);
+       return rc;
+}
+
+int mvs_I_T_nexus_reset(struct domain_device *dev)
+{
+       unsigned long flags;
+       int i, phyno[WIDE_PORT_MAX_PHY], num , rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_device * mvi_dev = (struct mvs_device *)dev->lldd_dev;
+       struct mvs_info *mvi = mvi_dev->mvi_info;
+
+       if (mvi_dev->dev_status != MVS_DEV_EH)
+               return TMF_RESP_FUNC_COMPLETE;
+       rc = mvs_debug_I_T_nexus_reset(dev);
+       mv_printk("%s for device[%x]:rc= %d\n",
+               __func__, mvi_dev->device_id, rc);
+
+       /* housekeeper */
+       num = mvs_find_dev_phyno(dev, phyno);
+       spin_lock_irqsave(&mvi->lock, flags);
+       for (i = 0; i < num; i++)
+               mvs_release_task(mvi, phyno[i], dev);
+       spin_unlock_irqrestore(&mvi->lock, flags);
+
+       return rc;
+}
+/* optional SAM-3 */
+int mvs_query_task(struct sas_task *task)
+{
+       u32 tag;
+       struct scsi_lun lun;
+       struct mvs_tmf_task tmf_task;
+       int rc = TMF_RESP_FUNC_FAILED;
+
+       if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+               struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+               struct domain_device *dev = task->dev;
+               struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+               struct mvs_info *mvi = mvi_dev->mvi_info;
+
+               int_to_scsilun(cmnd->device->lun, &lun);
+               rc = mvs_find_tag(mvi, task, &tag);
+               if (rc == 0) {
+                       rc = TMF_RESP_FUNC_FAILED;
+                       return rc;
+               }
+
+               tmf_task.tmf = TMF_QUERY_TASK;
+               tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+               rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+               switch (rc) {
+               /* The task is still in Lun, release it then */
+               case TMF_RESP_FUNC_SUCC:
+               /* The task is not in Lun or failed, reset the phy */
+               case TMF_RESP_FUNC_FAILED:
+               case TMF_RESP_FUNC_COMPLETE:
+                       break;
+               }
+       }
+       mv_printk("%s:rc= %d\n", __func__, rc);
+       return rc;
+}
+
+/*  mandatory SAM-3, still need free task/slot info */
+int mvs_abort_task(struct sas_task *task)
+{
+       struct scsi_lun lun;
+       struct mvs_tmf_task tmf_task;
+       struct domain_device *dev = task->dev;
+       struct mvs_device *mvi_dev = (struct mvs_device *)dev->lldd_dev;
+       struct mvs_info *mvi = mvi_dev->mvi_info;
+       int rc = TMF_RESP_FUNC_FAILED;
+       unsigned long flags;
+       u32 tag;
+
+       if (mvi->exp_req)
+               mvi->exp_req--;
+       spin_lock_irqsave(&task->task_state_lock, flags);
+       if (task->task_state_flags & SAS_TASK_STATE_DONE) {
+               spin_unlock_irqrestore(&task->task_state_lock, flags);
+               rc = TMF_RESP_FUNC_COMPLETE;
+               goto out;
+       }
+       spin_unlock_irqrestore(&task->task_state_lock, flags);
+       if (task->lldd_task && task->task_proto & SAS_PROTOCOL_SSP) {
+               struct scsi_cmnd * cmnd = (struct scsi_cmnd *)task->uldd_task;
+
+               int_to_scsilun(cmnd->device->lun, &lun);
+               rc = mvs_find_tag(mvi, task, &tag);
+               if (rc == 0) {
+                       mv_printk("No such tag in %s\n", __func__);
+                       rc = TMF_RESP_FUNC_FAILED;
+                       return rc;
+               }
+
+               tmf_task.tmf = TMF_ABORT_TASK;
+               tmf_task.tag_of_task_to_be_managed = cpu_to_le16(tag);
+
+               rc = mvs_debug_issue_ssp_tmf(dev, lun.scsi_lun, &tmf_task);
+
+               /* if successful, clear the task and callback forwards.*/
+               if (rc == TMF_RESP_FUNC_COMPLETE) {
+                       u32 slot_no;
+                       struct mvs_slot_info *slot;
+
+                       if (task->lldd_task) {
+                               slot = task->lldd_task;
+                               slot_no = (u32) (slot - mvi->slot_info);
+                               mvs_slot_complete(mvi, slot_no, 1);
+                       }
+               }
+       } else if (task->task_proto & SAS_PROTOCOL_SATA ||
+               task->task_proto & SAS_PROTOCOL_STP) {
+               /* to do free register_set */
+       } else {
+               /* SMP */
+
+       }
+out:
+       if (rc != TMF_RESP_FUNC_COMPLETE)
+               mv_printk("%s:rc= %d\n", __func__, rc);
+       return rc;
+}
+
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_tmf_task tmf_task;
+
+       tmf_task.tmf = TMF_ABORT_TASK_SET;
+       rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+       return rc;
+}
+
+int mvs_clear_aca(struct domain_device *dev, u8 *lun)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_tmf_task tmf_task;
+
+       tmf_task.tmf = TMF_CLEAR_ACA;
+       rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+       return rc;
+}
+
+int mvs_clear_task_set(struct domain_device *dev, u8 *lun)
+{
+       int rc = TMF_RESP_FUNC_FAILED;
+       struct mvs_tmf_task tmf_task;
+
+       tmf_task.tmf = TMF_CLEAR_TASK_SET;
+       rc = mvs_debug_issue_ssp_tmf(dev, lun, &tmf_task);
+
+       return rc;
+}
+
+static int mvs_sata_done(struct mvs_info *mvi, struct sas_task *task,
+                       u32 slot_idx, int err)
+{
+       struct mvs_device *mvi_dev = task->dev->lldd_dev;
+       struct task_status_struct *tstat = &task->task_status;
+       struct ata_task_resp *resp = (struct ata_task_resp *)tstat->buf;
+       int stat = SAM_GOOD;
+
+
+       resp->frame_len = sizeof(struct dev_to_host_fis);
+       memcpy(&resp->ending_fis[0],
+              SATA_RECEIVED_D2H_FIS(mvi_dev->taskfileset),
+              sizeof(struct dev_to_host_fis));
+       tstat->buf_valid_size = sizeof(*resp);
+       if (unlikely(err))
+               stat = SAS_PROTO_RESPONSE;
+       return stat;
+}
+
+static int mvs_slot_err(struct mvs_info *mvi, struct sas_task *task,
+                        u32 slot_idx)
+{
+       struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+       int stat;
+       u32 err_dw0 = le32_to_cpu(*(u32 *) (slot->response));
+       u32 tfs = 0;
+       enum mvs_port_type type = PORT_TYPE_SAS;
+
+       if (err_dw0 & CMD_ISS_STPD)
+               MVS_CHIP_DISP->issue_stop(mvi, type, tfs);
+
+       MVS_CHIP_DISP->command_active(mvi, slot_idx);
+
+       stat = SAM_CHECK_COND;
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SSP:
+               stat = SAS_ABORTED_TASK;
+               break;
+       case SAS_PROTOCOL_SMP:
+               stat = SAM_CHECK_COND;
+               break;
+
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
+       {
+               if (err_dw0 == 0x80400002)
+                       mv_printk("find reserved error, why?\n");
+
+               task->ata_task.use_ncq = 0;
+               stat = SAS_PROTO_RESPONSE;
+               mvs_sata_done(mvi, task, slot_idx, 1);
+
+       }
+               break;
+       default:
+               break;
+       }
+
+       return stat;
+}
+
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
+{
+       u32 slot_idx = rx_desc & RXQ_SLOT_MASK;
+       struct mvs_slot_info *slot = &mvi->slot_info[slot_idx];
+       struct sas_task *task = slot->task;
+       struct mvs_device *mvi_dev = NULL;
+       struct task_status_struct *tstat;
+
+       bool aborted;
+       void *to;
+       enum exec_status sts;
+
+       if (mvi->exp_req)
+               mvi->exp_req--;
+       if (unlikely(!task || !task->lldd_task))
+               return -1;
+
+       tstat = &task->task_status;
+       mvi_dev = task->dev->lldd_dev;
+
+       mvs_hba_cq_dump(mvi);
+
+       spin_lock(&task->task_state_lock);
+       task->task_state_flags &=
+               ~(SAS_TASK_STATE_PENDING | SAS_TASK_AT_INITIATOR);
+       task->task_state_flags |= SAS_TASK_STATE_DONE;
+       /* race condition*/
+       aborted = task->task_state_flags & SAS_TASK_STATE_ABORTED;
+       spin_unlock(&task->task_state_lock);
+
+       memset(tstat, 0, sizeof(*tstat));
+       tstat->resp = SAS_TASK_COMPLETE;
+
+       if (unlikely(aborted)) {
+               tstat->stat = SAS_ABORTED_TASK;
+               if (mvi_dev)
+                       mvi_dev->runing_req--;
+               if (sas_protocol_ata(task->task_proto))
+                       mvs_free_reg_set(mvi, mvi_dev);
+
+               mvs_slot_task_free(mvi, task, slot, slot_idx);
+               return -1;
+       }
+
+       if (unlikely(!mvi_dev || !slot->port->port_attached || flags)) {
+               mv_dprintk("port has not device.\n");
+               tstat->stat = SAS_PHY_DOWN;
+               goto out;
+       }
+
+       /*
+       if (unlikely((rx_desc & RXQ_ERR) || (*(u64 *) slot->response))) {
+                mv_dprintk("Find device[%016llx] RXQ_ERR %X,
+                err info:%016llx\n",
+                SAS_ADDR(task->dev->sas_addr),
+                rx_desc, (u64)(*(u64 *) slot->response));
+       }
+       */
+
+       /* error info record present */
+       if (unlikely((rx_desc & RXQ_ERR) && (*(u64 *) slot->response))) {
+               tstat->stat = mvs_slot_err(mvi, task, slot_idx);
+               goto out;
+       }
+
+       switch (task->task_proto) {
+       case SAS_PROTOCOL_SSP:
+               /* hw says status == 0, datapres == 0 */
+               if (rx_desc & RXQ_GOOD) {
+                       tstat->stat = SAM_GOOD;
+                       tstat->resp = SAS_TASK_COMPLETE;
+               }
+               /* response frame present */
+               else if (rx_desc & RXQ_RSP) {
+                       struct ssp_response_iu *iu = slot->response +
+                                               sizeof(struct mvs_err_info);
+                       sas_ssp_task_response(mvi->dev, task, iu);
+               } else
+                       tstat->stat = SAM_CHECK_COND;
+               break;
+
+       case SAS_PROTOCOL_SMP: {
+                       struct scatterlist *sg_resp = &task->smp_task.smp_resp;
+                       tstat->stat = SAM_GOOD;
+                       to = kmap_atomic(sg_page(sg_resp), KM_IRQ0);
+                       memcpy(to + sg_resp->offset,
+                               slot->response + sizeof(struct mvs_err_info),
+                               sg_dma_len(sg_resp));
+                       kunmap_atomic(to, KM_IRQ0);
+                       break;
+               }
+
+       case SAS_PROTOCOL_SATA:
+       case SAS_PROTOCOL_STP:
+       case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: {
+                       tstat->stat = mvs_sata_done(mvi, task, slot_idx, 0);
+                       break;
+               }
+
+       default:
+               tstat->stat = SAM_CHECK_COND;
+               break;
+       }
+
+out:
+       if (mvi_dev) {
+               mvi_dev->runing_req--;
+               if (sas_protocol_ata(task->task_proto))
+                       mvs_free_reg_set(mvi, mvi_dev);
+       }
+       mvs_slot_task_free(mvi, task, slot, slot_idx);
+       sts = tstat->stat;
+
+       spin_unlock(&mvi->lock);
+       if (task->task_done)
+               task->task_done(task);
+       else
+               mv_dprintk("why has not task_done.\n");
+       spin_lock(&mvi->lock);
+
+       return sts;
+}
+
+void mvs_release_task(struct mvs_info *mvi,
+               int phy_no, struct domain_device *dev)
+{
+       int i = 0; u32 slot_idx;
+       struct mvs_phy *phy;
+       struct mvs_port *port;
+       struct mvs_slot_info *slot, *slot2;
+
+       phy = &mvi->phy[phy_no];
+       port = phy->port;
+       if (!port)
+               return;
+
+       list_for_each_entry_safe(slot, slot2, &port->list, entry) {
+               struct sas_task *task;
+               slot_idx = (u32) (slot - mvi->slot_info);
+               task = slot->task;
+
+               if (dev && task->dev != dev)
+                       continue;
+
+               mv_printk("Release slot [%x] tag[%x], task [%p]:\n",
+                       slot_idx, slot->slot_tag, task);
+
+               if (task->task_proto & SAS_PROTOCOL_SSP) {
+                       mv_printk("attached with SSP task CDB[");
+                       for (i = 0; i < 16; i++)
+                               mv_printk(" %02x", task->ssp_task.cdb[i]);
+                       mv_printk(" ]\n");
+               }
+
+               mvs_slot_complete(mvi, slot_idx, 1);
+       }
+}
+
+static void mvs_phy_disconnected(struct mvs_phy *phy)
+{
+       phy->phy_attached = 0;
+       phy->att_dev_info = 0;
+       phy->att_dev_sas_addr = 0;
+}
+
+static void mvs_work_queue(struct work_struct *work)
+{
+       struct delayed_work *dw = container_of(work, struct delayed_work, work);
+       struct mvs_wq *mwq = container_of(dw, struct mvs_wq, work_q);
+       struct mvs_info *mvi = mwq->mvi;
+       unsigned long flags;
+
+       spin_lock_irqsave(&mvi->lock, flags);
+       if (mwq->handler & PHY_PLUG_EVENT) {
+               u32 phy_no = (unsigned long) mwq->data;
+               struct sas_ha_struct *sas_ha = mvi->sas;
+               struct mvs_phy *phy = &mvi->phy[phy_no];
+               struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+               if (phy->phy_event & PHY_PLUG_OUT) {
+                       u32 tmp;
+                       struct sas_identify_frame *id;
+                       id = (struct sas_identify_frame *)phy->frame_rcvd;
+                       tmp = MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no);
+                       phy->phy_event &= ~PHY_PLUG_OUT;
+                       if (!(tmp & PHY_READY_MASK)) {
+                               sas_phy_disconnected(sas_phy);
+                               mvs_phy_disconnected(phy);
+                               sas_ha->notify_phy_event(sas_phy,
+                                       PHYE_LOSS_OF_SIGNAL);
+                               mv_dprintk("phy%d Removed Device\n", phy_no);
+                       } else {
+                               MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+                               mvs_update_phyinfo(mvi, phy_no, 1);
+                               mvs_bytes_dmaed(mvi, phy_no);
+                               mvs_port_notify_formed(sas_phy, 0);
+                               mv_dprintk("phy%d Attached Device\n", phy_no);
+                       }
+               }
+       }
+       list_del(&mwq->entry);
+       spin_unlock_irqrestore(&mvi->lock, flags);
+       kfree(mwq);
+}
+
+static int mvs_handle_event(struct mvs_info *mvi, void *data, int handler)
+{
+       struct mvs_wq *mwq;
+       int ret = 0;
+
+       mwq = kmalloc(sizeof(struct mvs_wq), GFP_ATOMIC);
+       if (mwq) {
+               mwq->mvi = mvi;
+               mwq->data = data;
+               mwq->handler = handler;
+               MV_INIT_DELAYED_WORK(&mwq->work_q, mvs_work_queue, mwq);
+               list_add_tail(&mwq->entry, &mvi->wq_list);
+               schedule_delayed_work(&mwq->work_q, HZ * 2);
+       } else
+               ret = -ENOMEM;
+
+       return ret;
+}
+
+static void mvs_sig_time_out(unsigned long tphy)
+{
+       struct mvs_phy *phy = (struct mvs_phy *)tphy;
+       struct mvs_info *mvi = phy->mvi;
+       u8 phy_no;
+
+       for (phy_no = 0; phy_no < mvi->chip->n_phy; phy_no++) {
+               if (&mvi->phy[phy_no] == phy) {
+                       mv_dprintk("Get signature time out, reset phy %d\n",
+                               phy_no+mvi->id*mvi->chip->n_phy);
+                       MVS_CHIP_DISP->phy_reset(mvi, phy_no, 1);
+               }
+       }
+}
+
+static void mvs_sig_remove_timer(struct mvs_phy *phy)
+{
+       if (phy->timer.function)
+               del_timer(&phy->timer);
+       phy->timer.function = NULL;
+}
+
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events)
+{
+       u32 tmp;
+       struct sas_ha_struct *sas_ha = mvi->sas;
+       struct mvs_phy *phy = &mvi->phy[phy_no];
+       struct asd_sas_phy *sas_phy = &phy->sas_phy;
+
+       phy->irq_status = MVS_CHIP_DISP->read_port_irq_stat(mvi, phy_no);
+       mv_dprintk("port %d ctrl sts=0x%X.\n", phy_no+mvi->id*mvi->chip->n_phy,
+               MVS_CHIP_DISP->read_phy_ctl(mvi, phy_no));
+       mv_dprintk("Port %d irq sts = 0x%X\n", phy_no+mvi->id*mvi->chip->n_phy,
+               phy->irq_status);
+
+       /*
+       * events is port event now ,
+       * we need check the interrupt status which belongs to per port.
+       */
+
+       if (phy->irq_status & PHYEV_DCDR_ERR)
+               mv_dprintk("port %d STP decoding error.\n",
+               phy_no+mvi->id*mvi->chip->n_phy);
+
+       if (phy->irq_status & PHYEV_POOF) {
+               if (!(phy->phy_event & PHY_PLUG_OUT)) {
+                       int dev_sata = phy->phy_type & PORT_TYPE_SATA;
+                       int ready;
+                       mvs_release_task(mvi, phy_no, NULL);
+                       phy->phy_event |= PHY_PLUG_OUT;
+                       mvs_handle_event(mvi,
+                               (void *)(unsigned long)phy_no,
+                               PHY_PLUG_EVENT);
+                       ready = mvs_is_phy_ready(mvi, phy_no);
+                       if (!ready)
+                               mv_dprintk("phy%d Unplug Notice\n",
+                                       phy_no +
+                                       mvi->id * mvi->chip->n_phy);
+                       if (ready || dev_sata) {
+                               if (MVS_CHIP_DISP->stp_reset)
+                                       MVS_CHIP_DISP->stp_reset(mvi,
+                                                       phy_no);
+                               else
+                                       MVS_CHIP_DISP->phy_reset(mvi,
+                                                       phy_no, 0);
+                               return;
+                       }
+               }
+       }
+
+       if (phy->irq_status & PHYEV_COMWAKE) {
+               tmp = MVS_CHIP_DISP->read_port_irq_mask(mvi, phy_no);
+               MVS_CHIP_DISP->write_port_irq_mask(mvi, phy_no,
+                                       tmp | PHYEV_SIG_FIS);
+               if (phy->timer.function == NULL) {
+                       phy->timer.data = (unsigned long)phy;
+                       phy->timer.function = mvs_sig_time_out;
+                       phy->timer.expires = jiffies + 10*HZ;
+                       add_timer(&phy->timer);
+               }
+       }
+       if (phy->irq_status & (PHYEV_SIG_FIS | PHYEV_ID_DONE)) {
+               phy->phy_status = mvs_is_phy_ready(mvi, phy_no);
+               mvs_sig_remove_timer(phy);
+               mv_dprintk("notify plug in on phy[%d]\n", phy_no);
+               if (phy->phy_status) {
+                       mdelay(10);
+                       MVS_CHIP_DISP->detect_porttype(mvi, phy_no);
+                       if (phy->phy_type & PORT_TYPE_SATA) {
+                               tmp = MVS_CHIP_DISP->read_port_irq_mask(
+                                               mvi, phy_no);
+                               tmp &= ~PHYEV_SIG_FIS;
+                               MVS_CHIP_DISP->write_port_irq_mask(mvi,
+                                                       phy_no, tmp);
+                       }
+                       mvs_update_phyinfo(mvi, phy_no, 0);
+                       mvs_bytes_dmaed(mvi, phy_no);
+                       /* whether driver is going to handle hot plug */
+                       if (phy->phy_event & PHY_PLUG_OUT) {
+                               mvs_port_notify_formed(sas_phy, 0);
+                               phy->phy_event &= ~PHY_PLUG_OUT;
+                       }
+               } else {
+                       mv_dprintk("plugin interrupt but phy%d is gone\n",
+                               phy_no + mvi->id*mvi->chip->n_phy);
+               }
+       } else if (phy->irq_status & PHYEV_BROAD_CH) {
+               mv_dprintk("port %d broadcast change.\n",
+                       phy_no + mvi->id*mvi->chip->n_phy);
+               /* exception for Samsung disk drive*/
+               mdelay(1000);
+               sas_ha->notify_port_event(sas_phy, PORTE_BROADCAST_RCVD);
+       }
+       MVS_CHIP_DISP->write_port_irq_stat(mvi, phy_no, phy->irq_status);
+}
+
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear)
+{
+       u32 rx_prod_idx, rx_desc;
+       bool attn = false;
+
+       /* the first dword in the RX ring is special: it contains
+        * a mirror of the hardware's RX producer index, so that
+        * we don't have to stall the CPU reading that register.
+        * The actual RX ring is offset by one dword, due to this.
+        */
+       rx_prod_idx = mvi->rx_cons;
+       mvi->rx_cons = le32_to_cpu(mvi->rx[0]);
+       if (mvi->rx_cons == 0xfff)      /* h/w hasn't touched RX ring yet */
+               return 0;
+
+       /* The CMPL_Q may come late, read from register and try again
+       * note: if coalescing is enabled,
+       * it will need to read from register every time for sure
+       */
+       if (unlikely(mvi->rx_cons == rx_prod_idx))
+               mvi->rx_cons = MVS_CHIP_DISP->rx_update(mvi) & RX_RING_SZ_MASK;
+
+       if (mvi->rx_cons == rx_prod_idx)
+               return 0;
+
+       while (mvi->rx_cons != rx_prod_idx) {
+               /* increment our internal RX consumer pointer */
+               rx_prod_idx = (rx_prod_idx + 1) & (MVS_RX_RING_SZ - 1);
+               rx_desc = le32_to_cpu(mvi->rx[rx_prod_idx + 1]);
+
+               if (likely(rx_desc & RXQ_DONE))
+                       mvs_slot_complete(mvi, rx_desc, 0);
+               if (rx_desc & RXQ_ATTN) {
+                       attn = true;
+               } else if (rx_desc & RXQ_ERR) {
+                       if (!(rx_desc & RXQ_DONE))
+                               mvs_slot_complete(mvi, rx_desc, 0);
+               } else if (rx_desc & RXQ_SLOT_RESET) {
+                       mvs_slot_free(mvi, rx_desc);
+               }
+       }
+
+       if (attn && self_clear)
+               MVS_CHIP_DISP->int_full(mvi);
+       return 0;
+}
+
diff --git a/drivers/scsi/mvsas/mv_sas.h b/drivers/scsi/mvsas/mv_sas.h
new file mode 100644 (file)
index 0000000..aa2270a
--- /dev/null
@@ -0,0 +1,406 @@
+/*
+ * Marvell 88SE64xx/88SE94xx main function head file
+ *
+ * Copyright 2007 Red Hat, Inc.
+ * Copyright 2008 Marvell. <kewei@marvell.com>
+ *
+ * This file is licensed under GPLv2.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; version 2 of the
+ * License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
+ * USA
+*/
+
+#ifndef _MV_SAS_H_
+#define _MV_SAS_H_
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/types.h>
+#include <linux/ctype.h>
+#include <linux/dma-mapping.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/vmalloc.h>
+#include <scsi/libsas.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/sas_ata.h>
+#include <linux/version.h>
+#include "mv_defs.h"
+
+#define DRV_NAME               "mvsas"
+#define DRV_VERSION            "0.8.2"
+#define _MV_DUMP               0
+#define MVS_ID_NOT_MAPPED      0x7f
+/* #define DISABLE_HOTPLUG_DMA_FIX */
+#define MAX_EXP_RUNNING_REQ    2
+#define WIDE_PORT_MAX_PHY              4
+#define        MV_DISABLE_NCQ  0
+#define mv_printk(fmt, arg ...)        \
+       printk(KERN_DEBUG"%s %d:" fmt, __FILE__, __LINE__, ## arg)
+#ifdef MV_DEBUG
+#define mv_dprintk(format, arg...)     \
+       printk(KERN_DEBUG"%s %d:" format, __FILE__, __LINE__, ## arg)
+#else
+#define mv_dprintk(format, arg...)
+#endif
+#define MV_MAX_U32                     0xffffffff
+
+extern struct mvs_tgt_initiator mvs_tgt;
+extern struct mvs_info *tgt_mvi;
+extern const struct mvs_dispatch mvs_64xx_dispatch;
+extern const struct mvs_dispatch mvs_94xx_dispatch;
+
+#define DEV_IS_EXPANDER(type)  \
+       ((type == EDGE_DEV) || (type == FANOUT_DEV))
+
+#define bit(n) ((u32)1 << n)
+
+#define for_each_phy(__lseq_mask, __mc, __lseq)                        \
+       for ((__mc) = (__lseq_mask), (__lseq) = 0;              \
+                                       (__mc) != 0 ;           \
+                                       (++__lseq), (__mc) >>= 1)
+
+#define MV_INIT_DELAYED_WORK(w, f, d)  INIT_DELAYED_WORK(w, f)
+#define UNASSOC_D2H_FIS(id)            \
+       ((void *) mvi->rx_fis + 0x100 * id)
+#define SATA_RECEIVED_FIS_LIST(reg_set)        \
+       ((void *) mvi->rx_fis + mvi->chip->fis_offs + 0x100 * reg_set)
+#define SATA_RECEIVED_SDB_FIS(reg_set) \
+       (SATA_RECEIVED_FIS_LIST(reg_set) + 0x58)
+#define SATA_RECEIVED_D2H_FIS(reg_set) \
+       (SATA_RECEIVED_FIS_LIST(reg_set) + 0x40)
+#define SATA_RECEIVED_PIO_FIS(reg_set) \
+       (SATA_RECEIVED_FIS_LIST(reg_set) + 0x20)
+#define SATA_RECEIVED_DMA_FIS(reg_set) \
+       (SATA_RECEIVED_FIS_LIST(reg_set) + 0x00)
+
+enum dev_status {
+       MVS_DEV_NORMAL = 0x0,
+       MVS_DEV_EH      = 0x1,
+};
+
+
+struct mvs_info;
+
+struct mvs_dispatch {
+       char *name;
+       int (*chip_init)(struct mvs_info *mvi);
+       int (*spi_init)(struct mvs_info *mvi);
+       int (*chip_ioremap)(struct mvs_info *mvi);
+       void (*chip_iounmap)(struct mvs_info *mvi);
+       irqreturn_t (*isr)(struct mvs_info *mvi, int irq, u32 stat);
+       u32 (*isr_status)(struct mvs_info *mvi, int irq);
+       void (*interrupt_enable)(struct mvs_info *mvi);
+       void (*interrupt_disable)(struct mvs_info *mvi);
+
+       u32 (*read_phy_ctl)(struct mvs_info *mvi, u32 port);
+       void (*write_phy_ctl)(struct mvs_info *mvi, u32 port, u32 val);
+
+       u32 (*read_port_cfg_data)(struct mvs_info *mvi, u32 port);
+       void (*write_port_cfg_data)(struct mvs_info *mvi, u32 port, u32 val);
+       void (*write_port_cfg_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+       u32 (*read_port_vsr_data)(struct mvs_info *mvi, u32 port);
+       void (*write_port_vsr_data)(struct mvs_info *mvi, u32 port, u32 val);
+       void (*write_port_vsr_addr)(struct mvs_info *mvi, u32 port, u32 addr);
+
+       u32 (*read_port_irq_stat)(struct mvs_info *mvi, u32 port);
+       void (*write_port_irq_stat)(struct mvs_info *mvi, u32 port, u32 val);
+
+       u32 (*read_port_irq_mask)(struct mvs_info *mvi, u32 port);
+       void (*write_port_irq_mask)(struct mvs_info *mvi, u32 port, u32 val);
+
+       void (*get_sas_addr)(void *buf, u32 buflen);
+       void (*command_active)(struct mvs_info *mvi, u32 slot_idx);
+       void (*issue_stop)(struct mvs_info *mvi, enum mvs_port_type type,
+                               u32 tfs);
+       void (*start_delivery)(struct mvs_info *mvi, u32 tx);
+       u32 (*rx_update)(struct mvs_info *mvi);
+       void (*int_full)(struct mvs_info *mvi);
+       u8 (*assign_reg_set)(struct mvs_info *mvi, u8 *tfs);
+       void (*free_reg_set)(struct mvs_info *mvi, u8 *tfs);
+       u32 (*prd_size)(void);
+       u32 (*prd_count)(void);
+       void (*make_prd)(struct scatterlist *scatter, int nr, void *prd);
+       void (*detect_porttype)(struct mvs_info *mvi, int i);
+       int (*oob_done)(struct mvs_info *mvi, int i);
+       void (*fix_phy_info)(struct mvs_info *mvi, int i,
+                               struct sas_identify_frame *id);
+       void (*phy_work_around)(struct mvs_info *mvi, int i);
+       void (*phy_set_link_rate)(struct mvs_info *mvi, u32 phy_id,
+                               struct sas_phy_linkrates *rates);
+       u32 (*phy_max_link_rate)(void);
+       void (*phy_disable)(struct mvs_info *mvi, u32 phy_id);
+       void (*phy_enable)(struct mvs_info *mvi, u32 phy_id);
+       void (*phy_reset)(struct mvs_info *mvi, u32 phy_id, int hard);
+       void (*stp_reset)(struct mvs_info *mvi, u32 phy_id);
+       void (*clear_active_cmds)(struct mvs_info *mvi);
+       u32 (*spi_read_data)(struct mvs_info *mvi);
+       void (*spi_write_data)(struct mvs_info *mvi, u32 data);
+       int (*spi_buildcmd)(struct mvs_info *mvi,
+                                               u32      *dwCmd,
+                                               u8       cmd,
+                                               u8       read,
+                                               u8       length,
+                                               u32      addr
+                                               );
+       int (*spi_issuecmd)(struct mvs_info *mvi, u32 cmd);
+       int (*spi_waitdataready)(struct mvs_info *mvi, u32 timeout);
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       void (*dma_fix)(dma_addr_t buf_dma, int buf_len, int from, void *prd);
+#endif
+
+};
+
+struct mvs_chip_info {
+       u32             n_host;
+       u32             n_phy;
+       u32             fis_offs;
+       u32             fis_count;
+       u32             srs_sz;
+       u32             slot_width;
+       const struct mvs_dispatch *dispatch;
+};
+#define MVS_CHIP_SLOT_SZ       (1U << mvi->chip->slot_width)
+#define MVS_RX_FISL_SZ         \
+       (mvi->chip->fis_offs + (mvi->chip->fis_count * 0x100))
+#define MVS_CHIP_DISP          (mvi->chip->dispatch)
+
+struct mvs_err_info {
+       __le32                  flags;
+       __le32                  flags2;
+};
+
+struct mvs_cmd_hdr {
+       __le32                  flags;  /* PRD tbl len; SAS, SATA ctl */
+       __le32                  lens;   /* cmd, max resp frame len */
+       __le32                  tags;   /* targ port xfer tag; tag */
+       __le32                  data_len;       /* data xfer len */
+       __le64                  cmd_tbl;        /* command table address */
+       __le64                  open_frame;     /* open addr frame address */
+       __le64                  status_buf;     /* status buffer address */
+       __le64                  prd_tbl;                /* PRD tbl address */
+       __le32                  reserved[4];
+};
+
+struct mvs_port {
+       struct asd_sas_port     sas_port;
+       u8                      port_attached;
+       u8                      wide_port_phymap;
+       struct list_head        list;
+};
+
+struct mvs_phy {
+       struct mvs_info                 *mvi;
+       struct mvs_port         *port;
+       struct asd_sas_phy      sas_phy;
+       struct sas_identify     identify;
+       struct scsi_device      *sdev;
+       struct timer_list timer;
+       u64             dev_sas_addr;
+       u64             att_dev_sas_addr;
+       u32             att_dev_info;
+       u32             dev_info;
+       u32             phy_type;
+       u32             phy_status;
+       u32             irq_status;
+       u32             frame_rcvd_size;
+       u8              frame_rcvd[32];
+       u8              phy_attached;
+       u8              phy_mode;
+       u8              reserved[2];
+       u32             phy_event;
+       enum sas_linkrate       minimum_linkrate;
+       enum sas_linkrate       maximum_linkrate;
+};
+
+struct mvs_device {
+       struct list_head                dev_entry;
+       enum sas_dev_type dev_type;
+       struct mvs_info *mvi_info;
+       struct domain_device *sas_device;
+       u32 attached_phy;
+       u32 device_id;
+       u32 runing_req;
+       u8 taskfileset;
+       u8 dev_status;
+       u16 reserved;
+};
+
+struct mvs_slot_info {
+       struct list_head entry;
+       union {
+               struct sas_task *task;
+               void *tdata;
+       };
+       u32 n_elem;
+       u32 tx;
+       u32 slot_tag;
+
+       /* DMA buffer for storing cmd tbl, open addr frame, status buffer,
+        * and PRD table
+        */
+       void *buf;
+       dma_addr_t buf_dma;
+#if _MV_DUMP
+       u32 cmd_size;
+#endif
+       void *response;
+       struct mvs_port *port;
+       struct mvs_device       *device;
+       void *open_frame;
+};
+
+struct mvs_info {
+       unsigned long flags;
+
+       /* host-wide lock */
+       spinlock_t lock;
+
+       /* our device */
+       struct pci_dev *pdev;
+       struct device *dev;
+
+       /* enhanced mode registers */
+       void __iomem *regs;
+
+       /* peripheral or soc registers */
+       void __iomem *regs_ex;
+       u8 sas_addr[SAS_ADDR_SIZE];
+
+       /* SCSI/SAS glue */
+       struct sas_ha_struct *sas;
+       struct Scsi_Host *shost;
+
+       /* TX (delivery) DMA ring */
+       __le32 *tx;
+       dma_addr_t tx_dma;
+
+       /* cached next-producer idx */
+       u32 tx_prod;
+
+       /* RX (completion) DMA ring */
+       __le32  *rx;
+       dma_addr_t rx_dma;
+
+       /* RX consumer idx */
+       u32 rx_cons;
+
+       /* RX'd FIS area */
+       __le32 *rx_fis;
+       dma_addr_t rx_fis_dma;
+
+       /* DMA command header slots */
+       struct mvs_cmd_hdr *slot;
+       dma_addr_t slot_dma;
+
+       u32 chip_id;
+       const struct mvs_chip_info *chip;
+
+       int tags_num;
+       DECLARE_BITMAP(tags, MVS_SLOTS);
+       /* further per-slot information */
+       struct mvs_phy phy[MVS_MAX_PHYS];
+       struct mvs_port port[MVS_MAX_PHYS];
+       u32 irq;
+       u32 exp_req;
+       u32 id;
+       u64 sata_reg_set;
+       struct list_head *hba_list;
+       struct list_head soc_entry;
+       struct list_head wq_list;
+       unsigned long instance;
+       u16 flashid;
+       u32 flashsize;
+       u32 flashsectSize;
+
+       void *addon;
+       struct mvs_device       devices[MVS_MAX_DEVICES];
+#ifndef DISABLE_HOTPLUG_DMA_FIX
+       void *bulk_buffer;
+       dma_addr_t bulk_buffer_dma;
+#define TRASH_BUCKET_SIZE      0x20000
+#endif
+       struct mvs_slot_info slot_info[0];
+};
+
+struct mvs_prv_info{
+       u8 n_host;
+       u8 n_phy;
+       u16 reserve;
+       struct mvs_info *mvi[2];
+};
+
+struct mvs_wq {
+       struct delayed_work work_q;
+       struct mvs_info *mvi;
+       void *data;
+       int handler;
+       struct list_head entry;
+};
+
+struct mvs_task_exec_info {
+       struct sas_task *task;
+       struct mvs_cmd_hdr *hdr;
+       struct mvs_port *port;
+       u32 tag;
+       int n_elem;
+};
+
+
+/******************** function prototype *********************/
+void mvs_get_sas_addr(void *buf, u32 buflen);
+void mvs_tag_clear(struct mvs_info *mvi, u32 tag);
+void mvs_tag_free(struct mvs_info *mvi, u32 tag);
+void mvs_tag_set(struct mvs_info *mvi, unsigned int tag);
+int mvs_tag_alloc(struct mvs_info *mvi, u32 *tag_out);
+void mvs_tag_init(struct mvs_info *mvi);
+void mvs_iounmap(void __iomem *regs);
+int mvs_ioremap(struct mvs_info *mvi, int bar, int bar_ex);
+void mvs_phys_reset(struct mvs_info *mvi, u32 phy_mask, int hard);
+int mvs_phy_control(struct asd_sas_phy *sas_phy, enum phy_func func,
+                       void *funcdata);
+void __devinit mvs_set_sas_addr(struct mvs_info *mvi, int port_id,
+                               u32 off_lo, u32 off_hi, u64 sas_addr);
+int mvs_slave_alloc(struct scsi_device *scsi_dev);
+int mvs_slave_configure(struct scsi_device *sdev);
+void mvs_scan_start(struct Scsi_Host *shost);
+int mvs_scan_finished(struct Scsi_Host *shost, unsigned long time);
+int mvs_queue_command(struct sas_task *task, const int num,
+                       gfp_t gfp_flags);
+int mvs_abort_task(struct sas_task *task);
+int mvs_abort_task_set(struct domain_device *dev, u8 *lun);
+int mvs_clear_aca(struct domain_device *dev, u8 *lun);
+int mvs_clear_task_set(struct domain_device *dev, u8 * lun);
+void mvs_port_formed(struct asd_sas_phy *sas_phy);
+void mvs_port_deformed(struct asd_sas_phy *sas_phy);
+int mvs_dev_found(struct domain_device *dev);
+void mvs_dev_gone(struct domain_device *dev);
+int mvs_lu_reset(struct domain_device *dev, u8 *lun);
+int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags);
+int mvs_I_T_nexus_reset(struct domain_device *dev);
+int mvs_query_task(struct sas_task *task);
+void mvs_release_task(struct mvs_info *mvi, int phy_no,
+                       struct domain_device *dev);
+void mvs_int_port(struct mvs_info *mvi, int phy_no, u32 events);
+void mvs_update_phyinfo(struct mvs_info *mvi, int i, int get_st);
+int mvs_int_rx(struct mvs_info *mvi, bool self_clear);
+void mvs_hexdump(u32 size, u8 *data, u32 baseaddr);
+#endif
+
index 0e207aa67d16e3e8e4c8c6e4c1b2f0d6c88fe8c8..5fd73d77c3af86b17344c1e544dd4ed22bdd6105 100644 (file)
 # it under the terms of the GNU General Public License version 2
 #
 
-ifneq ($(OSD_INC),)
-# we are built out-of-tree Kconfigure everything as on
-
-CONFIG_SCSI_OSD_INITIATOR=m
-ccflags-y += -DCONFIG_SCSI_OSD_INITIATOR -DCONFIG_SCSI_OSD_INITIATOR_MODULE
-
-CONFIG_SCSI_OSD_ULD=m
-ccflags-y += -DCONFIG_SCSI_OSD_ULD -DCONFIG_SCSI_OSD_ULD_MODULE
-
-# CONFIG_SCSI_OSD_DPRINT_SENSE =
-#      0 - no print of errors
-#      1 - print errors
-#      2 - errors + warrnings
-ccflags-y += -DCONFIG_SCSI_OSD_DPRINT_SENSE=1
-
-# Uncomment to turn debug on
-# ccflags-y += -DCONFIG_SCSI_OSD_DEBUG
-
-# if we are built out-of-tree and the hosting kernel has OSD headers
-# then "ccflags-y +=" will not pick the out-off-tree headers. Only by doing
-# this it will work. This might break in future kernels
-LINUXINCLUDE := -I$(OSD_INC) $(LINUXINCLUDE)
-
-endif
-
 # libosd.ko - osd-initiator library
 libosd-y := osd_initiator.o
 obj-$(CONFIG_SCSI_OSD_INITIATOR) += libosd.o
diff --git a/drivers/scsi/osd/Makefile b/drivers/scsi/osd/Makefile
deleted file mode 100755 (executable)
index d905344..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-#
-# Makefile for the OSD modules (out of tree)
-#
-# Copyright (C) 2008 Panasas Inc.  All rights reserved.
-#
-# Authors:
-#   Boaz Harrosh <bharrosh@panasas.com>
-#   Benny Halevy <bhalevy@panasas.com>
-#
-# This program is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License version 2
-#
-# This Makefile is used to call the kernel Makefile in case of an out-of-tree
-# build.
-# $KSRC should point to a Kernel source tree otherwise host's default is
-# used. (eg. /lib/modules/`uname -r`/build)
-
-# include path for out-of-tree Headers
-OSD_INC ?= `pwd`/../../../include
-
-# allow users to override these
-# e.g. to compile for a kernel that you aren't currently running
-KSRC ?= /lib/modules/$(shell uname -r)/build
-KBUILD_OUTPUT ?=
-ARCH ?=
-V ?= 0
-
-# this is the basic Kbuild out-of-tree invocation, with the M= option
-KBUILD_BASE = +$(MAKE) -C $(KSRC) M=`pwd` KBUILD_OUTPUT=$(KBUILD_OUTPUT) ARCH=$(ARCH) V=$(V)
-
-all: libosd
-
-libosd: ;
-       $(KBUILD_BASE) OSD_INC=$(OSD_INC) modules
-
-clean:
-       $(KBUILD_BASE) clean
index 5776b2ab6b12a5fa54fe974b11e31619d8ee18e0..7a117c18114cd757de416045da2d453327742131 100644 (file)
@@ -118,39 +118,39 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
                _osd_ver_desc(or));
 
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_VENDOR_IDENTIFICATION [%s]\n",
+       OSD_INFO("VENDOR_IDENTIFICATION  [%s]\n",
                (char *)pFirst);
 
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_PRODUCT_IDENTIFICATION [%s]\n",
+       OSD_INFO("PRODUCT_IDENTIFICATION [%s]\n",
                (char *)pFirst);
 
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_PRODUCT_MODEL [%s]\n",
+       OSD_INFO("PRODUCT_MODEL          [%s]\n",
                (char *)pFirst);
 
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_PRODUCT_REVISION_LEVEL [%u]\n",
+       OSD_INFO("PRODUCT_REVISION_LEVEL [%u]\n",
                pFirst ? get_unaligned_be32(pFirst) : ~0U);
 
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER [%s]\n",
+       OSD_INFO("PRODUCT_SERIAL_NUMBER  [%s]\n",
                (char *)pFirst);
 
        pFirst = get_attrs[a].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_OSD_NAME [%s]\n", (char *)pFirst);
+       OSD_INFO("OSD_NAME               [%s]\n", (char *)pFirst);
        a++;
 
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_TOTAL_CAPACITY [0x%llx]\n",
+       OSD_INFO("TOTAL_CAPACITY         [0x%llx]\n",
                pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_USED_CAPACITY [0x%llx]\n",
+       OSD_INFO("USED_CAPACITY          [0x%llx]\n",
                pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_NUMBER_OF_PARTITIONS [%llu]\n",
+       OSD_INFO("NUMBER_OF_PARTITIONS   [%llu]\n",
                pFirst ? _LLU(get_unaligned_be64(pFirst)) : ~0ULL);
 
        if (a >= nelem)
@@ -158,7 +158,7 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
 
        /* FIXME: Where are the time utilities */
        pFirst = get_attrs[a++].val_ptr;
-       OSD_INFO("OSD_ATTR_RI_CLOCK [0x%02x%02x%02x%02x%02x%02x]\n",
+       OSD_INFO("CLOCK                  [0x%02x%02x%02x%02x%02x%02x]\n",
                ((char *)pFirst)[0], ((char *)pFirst)[1],
                ((char *)pFirst)[2], ((char *)pFirst)[3],
                ((char *)pFirst)[4], ((char *)pFirst)[5]);
@@ -169,7 +169,8 @@ static int _osd_print_system_info(struct osd_dev *od, void *caps)
 
                hex_dump_to_buffer(get_attrs[a].val_ptr, len, 32, 1,
                                   sid_dump, sizeof(sid_dump), true);
-               OSD_INFO("OSD_ATTR_RI_OSD_SYSTEM_ID(%d) [%s]\n", len, sid_dump);
+               OSD_INFO("OSD_SYSTEM_ID(%d)\n"
+                        "        [%s]\n", len, sid_dump);
                a++;
        }
 out:
@@ -669,7 +670,7 @@ static int _osd_req_list_objects(struct osd_request *or,
        __be16 action, const struct osd_obj_id *obj, osd_id initial_id,
        struct osd_obj_id_list *list, unsigned nelem)
 {
-       struct request_queue *q = or->osd_dev->scsi_device->request_queue;
+       struct request_queue *q = osd_request_queue(or->osd_dev);
        u64 len = nelem * sizeof(osd_id) + sizeof(*list);
        struct bio *bio;
 
@@ -778,16 +779,32 @@ EXPORT_SYMBOL(osd_req_remove_object);
 */
 
 void osd_req_write(struct osd_request *or,
-       const struct osd_obj_id *obj, struct bio *bio, u64 offset)
+       const struct osd_obj_id *obj, u64 offset,
+       struct bio *bio, u64 len)
 {
-       _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, bio->bi_size);
+       _osd_req_encode_common(or, OSD_ACT_WRITE, obj, offset, len);
        WARN_ON(or->out.bio || or->out.total_bytes);
-       bio->bi_rw |= (1 << BIO_RW);
+       WARN_ON(0 ==  bio_rw_flagged(bio, BIO_RW));
        or->out.bio = bio;
-       or->out.total_bytes = bio->bi_size;
+       or->out.total_bytes = len;
 }
 EXPORT_SYMBOL(osd_req_write);
 
+int osd_req_write_kern(struct osd_request *or,
+       const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
+{
+       struct request_queue *req_q = osd_request_queue(or->osd_dev);
+       struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
+
+       if (IS_ERR(bio))
+               return PTR_ERR(bio);
+
+       bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
+       osd_req_write(or, obj, offset, bio, len);
+       return 0;
+}
+EXPORT_SYMBOL(osd_req_write_kern);
+
 /*TODO: void osd_req_append(struct osd_request *,
        const struct osd_obj_id *, struct bio *data_out); */
 /*TODO: void osd_req_create_write(struct osd_request *,
@@ -813,16 +830,31 @@ void osd_req_flush_object(struct osd_request *or,
 EXPORT_SYMBOL(osd_req_flush_object);
 
 void osd_req_read(struct osd_request *or,
-       const struct osd_obj_id *obj, struct bio *bio, u64 offset)
+       const struct osd_obj_id *obj, u64 offset,
+       struct bio *bio, u64 len)
 {
-       _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, bio->bi_size);
+       _osd_req_encode_common(or, OSD_ACT_READ, obj, offset, len);
        WARN_ON(or->in.bio || or->in.total_bytes);
-       bio->bi_rw &= ~(1 << BIO_RW);
+       WARN_ON(1 == bio_rw_flagged(bio, BIO_RW));
        or->in.bio = bio;
-       or->in.total_bytes = bio->bi_size;
+       or->in.total_bytes = len;
 }
 EXPORT_SYMBOL(osd_req_read);
 
+int osd_req_read_kern(struct osd_request *or,
+       const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
+{
+       struct request_queue *req_q = osd_request_queue(or->osd_dev);
+       struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
+
+       if (IS_ERR(bio))
+               return PTR_ERR(bio);
+
+       osd_req_read(or, obj, offset, bio, len);
+       return 0;
+}
+EXPORT_SYMBOL(osd_req_read_kern);
+
 void osd_req_get_attributes(struct osd_request *or,
        const struct osd_obj_id *obj)
 {
@@ -1213,7 +1245,7 @@ static inline void osd_sec_parms_set_in_offset(bool is_v1,
 }
 
 static int _osd_req_finalize_data_integrity(struct osd_request *or,
-       bool has_in, bool has_out, const u8 *cap_key)
+       bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
 {
        struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
        int ret;
@@ -1228,8 +1260,7 @@ static int _osd_req_finalize_data_integrity(struct osd_request *or,
                };
                unsigned pad;
 
-               or->out_data_integ.data_bytes = cpu_to_be64(
-                       or->out.bio ? or->out.bio->bi_size : 0);
+               or->out_data_integ.data_bytes = cpu_to_be64(out_data_bytes);
                or->out_data_integ.set_attributes_bytes = cpu_to_be64(
                        or->set_attr.total_bytes);
                or->out_data_integ.get_attributes_bytes = cpu_to_be64(
@@ -1306,6 +1337,8 @@ static int _init_blk_request(struct osd_request *or,
 
        or->request = req;
        req->cmd_type = REQ_TYPE_BLOCK_PC;
+       req->cmd_flags |= REQ_QUIET;
+
        req->timeout = or->timeout;
        req->retries = or->retries;
        req->sense = or->sense;
@@ -1339,6 +1372,7 @@ int osd_finalize_request(struct osd_request *or,
 {
        struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
        bool has_in, has_out;
+       u64 out_data_bytes = or->out.total_bytes;
        int ret;
 
        if (options & OSD_REQ_FUA)
@@ -1388,7 +1422,8 @@ int osd_finalize_request(struct osd_request *or,
                }
        }
 
-       ret = _osd_req_finalize_data_integrity(or, has_in, has_out, cap_key);
+       ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
+                                              out_data_bytes, cap_key);
        if (ret)
                return ret;
 
index 22b59e13ba83fd7d91b71c6b78f7b945291ab5da..0bdef339090288d409e23557bf3c0956f400f241 100644 (file)
@@ -49,6 +49,7 @@
 #include <linux/device.h>
 #include <linux/idr.h>
 #include <linux/major.h>
+#include <linux/file.h>
 
 #include <scsi/scsi.h>
 #include <scsi/scsi_driver.h>
@@ -175,10 +176,9 @@ static const struct file_operations osd_fops = {
 
 struct osd_dev *osduld_path_lookup(const char *name)
 {
-       struct path path;
-       struct inode *inode;
-       struct cdev *cdev;
-       struct osd_uld_device *uninitialized_var(oud);
+       struct osd_uld_device *oud;
+       struct osd_dev *od;
+       struct file *file;
        int error;
 
        if (!name || !*name) {
@@ -186,52 +186,46 @@ struct osd_dev *osduld_path_lookup(const char *name)
                return ERR_PTR(-EINVAL);
        }
 
-       error = kern_path(name, LOOKUP_FOLLOW, &path);
-       if (error) {
-               OSD_ERR("path_lookup of %s failed=>%d\n", name, error);
-               return ERR_PTR(error);
-       }
+       od = kzalloc(sizeof(*od), GFP_KERNEL);
+       if (!od)
+               return ERR_PTR(-ENOMEM);
 
-       inode = path.dentry->d_inode;
-       error = -EINVAL; /* Not the right device e.g osd_uld_device */
-       if (!S_ISCHR(inode->i_mode)) {
-               OSD_DEBUG("!S_ISCHR()\n");
-               goto out;
+       file = filp_open(name, O_RDWR, 0);
+       if (IS_ERR(file)) {
+               error = PTR_ERR(file);
+               goto free_od;
        }
 
-       cdev = inode->i_cdev;
-       if (!cdev) {
-               OSD_ERR("Before mounting an OSD Based filesystem\n");
-               OSD_ERR("  user-mode must open+close the %s device\n", name);
-               OSD_ERR("  Example: bash: echo < %s\n", name);
-               goto out;
+       if (file->f_op != &osd_fops){
+               error = -EINVAL;
+               goto close_file;
        }
 
-       /* The Magic wand. Is it our char-dev */
-       /* TODO: Support sg devices */
-       if (cdev->owner != THIS_MODULE) {
-               OSD_ERR("Error mounting %s - is not an OSD device\n", name);
-               goto out;
-       }
+       oud = file->private_data;
 
-       oud = container_of(cdev, struct osd_uld_device, cdev);
+       *od = oud->od;
+       od->file = file;
 
-       __uld_get(oud);
-       error = 0;
+       return od;
 
-out:
-       path_put(&path);
-       return error ? ERR_PTR(error) : &oud->od;
+close_file:
+       fput(file);
+free_od:
+       kfree(od);
+       return ERR_PTR(error);
 }
 EXPORT_SYMBOL(osduld_path_lookup);
 
 void osduld_put_device(struct osd_dev *od)
 {
-       if (od) {
-               struct osd_uld_device *oud = container_of(od,
-                                               struct osd_uld_device, od);
 
-               __uld_put(oud);
+       if (od && !IS_ERR(od)) {
+               struct osd_uld_device *oud = od->file->private_data;
+
+               BUG_ON(od->scsi_device != oud->od.scsi_device);
+
+               fput(od->file);
+               kfree(od);
        }
 }
 EXPORT_SYMBOL(osduld_put_device);
index 5defe5ea5eda64937f605ebd6dceae3d6958c76f..8371d917a9a2408242a2439c9c69f298cc012964 100644 (file)
 * General Public License for more details.
 *
 ******************************************************************************/
-#define QLA1280_VERSION      "3.26"
+#define QLA1280_VERSION      "3.27"
 /*****************************************************************************
     Revision History:
+    Rev  3.27, February 10, 2009, Michael Reed
+       - General code cleanup.
+       - Improve error recovery.
     Rev  3.26, January 16, 2006 Jes Sorensen
        - Ditch all < 2.6 support
     Rev  3.25.1, February 10, 2005 Christoph Hellwig
@@ -435,7 +438,6 @@ static int qla1280_mailbox_command(struct scsi_qla_host *,
                                   uint8_t, uint16_t *);
 static int qla1280_bus_reset(struct scsi_qla_host *, int);
 static int qla1280_device_reset(struct scsi_qla_host *, int, int);
-static int qla1280_abort_device(struct scsi_qla_host *, int, int, int);
 static int qla1280_abort_command(struct scsi_qla_host *, struct srb *, int);
 static int qla1280_abort_isp(struct scsi_qla_host *);
 #ifdef QLA_64BIT_PTR
@@ -698,7 +700,7 @@ qla1280_info(struct Scsi_Host *host)
 }
 
 /**************************************************************************
- *   qla1200_queuecommand
+ *   qla1280_queuecommand
  *     Queue a command to the controller.
  *
  * Note:
@@ -713,12 +715,14 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
 {
        struct Scsi_Host *host = cmd->device->host;
        struct scsi_qla_host *ha = (struct scsi_qla_host *)host->hostdata;
-       struct srb *sp = (struct srb *)&cmd->SCp;
+       struct srb *sp = (struct srb *)CMD_SP(cmd);
        int status;
 
        cmd->scsi_done = fn;
        sp->cmd = cmd;
        sp->flags = 0;
+       sp->wait = NULL;
+       CMD_HANDLE(cmd) = (unsigned char *)NULL;
 
        qla1280_print_scsi_cmd(5, cmd);
 
@@ -738,21 +742,11 @@ qla1280_queuecommand(struct scsi_cmnd *cmd, void (*fn)(struct scsi_cmnd *))
 
 enum action {
        ABORT_COMMAND,
-       ABORT_DEVICE,
        DEVICE_RESET,
        BUS_RESET,
        ADAPTER_RESET,
-       FAIL
 };
 
-/* timer action for error action processor */
-static void qla1280_error_wait_timeout(unsigned long __data)
-{
-       struct scsi_cmnd *cmd = (struct scsi_cmnd *)__data;
-       struct srb *sp = (struct srb *)CMD_SP(cmd);
-
-       complete(sp->wait);
-}
 
 static void qla1280_mailbox_timeout(unsigned long __data)
 {
@@ -767,8 +761,67 @@ static void qla1280_mailbox_timeout(unsigned long __data)
        complete(ha->mailbox_wait);
 }
 
+static int
+_qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp,
+                                struct completion *wait)
+{
+       int     status = FAILED;
+       struct scsi_cmnd *cmd = sp->cmd;
+
+       spin_unlock_irq(ha->host->host_lock);
+       wait_for_completion_timeout(wait, 4*HZ);
+       spin_lock_irq(ha->host->host_lock);
+       sp->wait = NULL;
+       if(CMD_HANDLE(cmd) == COMPLETED_HANDLE) {
+               status = SUCCESS;
+               (*cmd->scsi_done)(cmd);
+       }
+       return status;
+}
+
+static int
+qla1280_wait_for_single_command(struct scsi_qla_host *ha, struct srb *sp)
+{
+       DECLARE_COMPLETION_ONSTACK(wait);
+
+       sp->wait = &wait;
+       return _qla1280_wait_for_single_command(ha, sp, &wait);
+}
+
+static int
+qla1280_wait_for_pending_commands(struct scsi_qla_host *ha, int bus, int target)
+{
+       int             cnt;
+       int             status;
+       struct srb      *sp;
+       struct scsi_cmnd *cmd;
+
+       status = SUCCESS;
+
+       /*
+        * Wait for all commands with the designated bus/target
+        * to be completed by the firmware
+        */
+       for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+               sp = ha->outstanding_cmds[cnt];
+               if (sp) {
+                       cmd = sp->cmd;
+
+                       if (bus >= 0 && SCSI_BUS_32(cmd) != bus)
+                               continue;
+                       if (target >= 0 && SCSI_TCN_32(cmd) != target)
+                               continue;
+
+                       status = qla1280_wait_for_single_command(ha, sp);
+                       if (status == FAILED)
+                               break;
+               }
+       }
+       return status;
+}
+
 /**************************************************************************
- * qla1200_error_action
+ * qla1280_error_action
  *    The function will attempt to perform a specified error action and
  *    wait for the results (or time out).
  *
@@ -780,11 +833,6 @@ static void qla1280_mailbox_timeout(unsigned long __data)
  * Returns:
  *      SUCCESS or FAILED
  *
- * Note:
- *      Resetting the bus always succeeds - is has to, otherwise the
- *      kernel will panic! Try a surgical technique - sending a BUS
- *      DEVICE RESET message - on the offending target before pulling
- *      the SCSI bus reset line.
  **************************************************************************/
 static int
 qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
@@ -792,13 +840,19 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
        struct scsi_qla_host *ha;
        int bus, target, lun;
        struct srb *sp;
-       uint16_t data;
-       unsigned char *handle;
-       int result, i;
+       int i, found;
+       int result=FAILED;
+       int wait_for_bus=-1;
+       int wait_for_target = -1;
        DECLARE_COMPLETION_ONSTACK(wait);
-       struct timer_list timer;
+
+       ENTER("qla1280_error_action");
 
        ha = (struct scsi_qla_host *)(CMD_HOST(cmd)->hostdata);
+       sp = (struct srb *)CMD_SP(cmd);
+       bus = SCSI_BUS_32(cmd);
+       target = SCSI_TCN_32(cmd);
+       lun = SCSI_LUN_32(cmd);
 
        dprintk(4, "error_action %i, istatus 0x%04x\n", action,
                RD_REG_WORD(&ha->iobase->istatus));
@@ -807,99 +861,47 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
                RD_REG_WORD(&ha->iobase->host_cmd),
                RD_REG_WORD(&ha->iobase->ictrl), jiffies);
 
-       ENTER("qla1280_error_action");
        if (qla1280_verbose)
                printk(KERN_INFO "scsi(%li): Resetting Cmnd=0x%p, "
                       "Handle=0x%p, action=0x%x\n",
                       ha->host_no, cmd, CMD_HANDLE(cmd), action);
 
-       if (cmd == NULL) {
-               printk(KERN_WARNING "(scsi?:?:?:?) Reset called with NULL "
-                      "si_Cmnd pointer, failing.\n");
-               LEAVE("qla1280_error_action");
-               return FAILED;
-       }
-
-       ha = (struct scsi_qla_host *)cmd->device->host->hostdata;
-       sp = (struct srb *)CMD_SP(cmd);
-       handle = CMD_HANDLE(cmd);
-
-       /* Check for pending interrupts. */
-       data = qla1280_debounce_register(&ha->iobase->istatus);
-       /*
-        * The io_request_lock is held when the reset handler is called, hence
-        * the interrupt handler cannot be running in parallel as it also
-        * grabs the lock. /Jes
-        */
-       if (data & RISC_INT)
-               qla1280_isr(ha, &ha->done_q);
-
        /*
-        * Determine the suggested action that the mid-level driver wants
-        * us to perform.
+        * Check to see if we have the command in the outstanding_cmds[]
+        * array.  If not then it must have completed before this error
+        * action was initiated.  If the error_action isn't ABORT_COMMAND
+        * then the driver must proceed with the requested action.
         */
-       if (handle == (unsigned char *)INVALID_HANDLE || handle == NULL) {
-               if(action == ABORT_COMMAND) {
-                       /* we never got this command */
-                       printk(KERN_INFO "qla1280: Aborting a NULL handle\n");
-                       return SUCCESS; /* no action - we don't have command */
+       found = -1;
+       for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
+               if (sp == ha->outstanding_cmds[i]) {
+                       found = i;
+                       sp->wait = &wait; /* we'll wait for it to complete */
+                       break;
                }
-       } else {
-               sp->wait = &wait;
        }
 
-       bus = SCSI_BUS_32(cmd);
-       target = SCSI_TCN_32(cmd);
-       lun = SCSI_LUN_32(cmd);
+       if (found < 0) {        /* driver doesn't have command */
+               result = SUCCESS;
+               if (qla1280_verbose) {
+                       printk(KERN_INFO
+                              "scsi(%ld:%d:%d:%d): specified command has "
+                              "already completed.\n", ha->host_no, bus,
+                               target, lun);
+               }
+       }
 
-       /* Overloading result.  Here it means the success or fail of the
-        * *issue* of the action.  When we return from the routine, it must
-        * mean the actual success or fail of the action */
-       result = FAILED;
        switch (action) {
-       case FAIL:
-               break;
 
        case ABORT_COMMAND:
-               if ((sp->flags & SRB_ABORT_PENDING)) {
-                       printk(KERN_WARNING
-                              "scsi(): Command has a pending abort "
-                              "message - ABORT_PENDING.\n");
-                       /* This should technically be impossible since we
-                        * now wait for abort completion */
-                       break;
-               }
-
-               for (i = 0; i < MAX_OUTSTANDING_COMMANDS; i++) {
-                       if (sp == ha->outstanding_cmds[i]) {
-                               dprintk(1, "qla1280: RISC aborting command\n");
-                               if (qla1280_abort_command(ha, sp, i) == 0)
-                                       result = SUCCESS;
-                               else {
-                                       /*
-                                        * Since we don't know what might
-                                        * have happend to the command, it
-                                        * is unsafe to remove it from the
-                                        * device's queue at this point.
-                                        * Wait and let the escalation
-                                        * process take care of it.
-                                        */
-                                       printk(KERN_WARNING
-                                              "scsi(%li:%i:%i:%i): Unable"
-                                              " to abort command!\n",
-                                              ha->host_no, bus, target, lun);
-                               }
-                       }
-               }
-               break;
-
-       case ABORT_DEVICE:
-               if (qla1280_verbose)
-                       printk(KERN_INFO
-                              "scsi(%ld:%d:%d:%d): Queueing abort device "
-                              "command.\n", ha->host_no, bus, target, lun);
-               if (qla1280_abort_device(ha, bus, target, lun) == 0)
-                       result = SUCCESS;
+               dprintk(1, "qla1280: RISC aborting command\n");
+               /*
+                * The abort might fail due to race when the host_lock
+                * is released to issue the abort.  As such, we
+                * don't bother to check the return status.
+                */
+               if (found >= 0)
+                       qla1280_abort_command(ha, sp, found);
                break;
 
        case DEVICE_RESET:
@@ -907,16 +909,21 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
                        printk(KERN_INFO
                               "scsi(%ld:%d:%d:%d): Queueing device reset "
                               "command.\n", ha->host_no, bus, target, lun);
-               if (qla1280_device_reset(ha, bus, target) == 0)
-                       result = SUCCESS;
+               if (qla1280_device_reset(ha, bus, target) == 0) {
+                       /* issued device reset, set wait conditions */
+                       wait_for_bus = bus;
+                       wait_for_target = target;
+               }
                break;
 
        case BUS_RESET:
                if (qla1280_verbose)
                        printk(KERN_INFO "qla1280(%ld:%d): Issued bus "
                               "reset.\n", ha->host_no, bus);
-               if (qla1280_bus_reset(ha, bus) == 0)
-                       result = SUCCESS;
+               if (qla1280_bus_reset(ha, bus) == 0) {
+                       /* issued bus reset, set wait conditions */
+                       wait_for_bus = bus;
+               }
                break;
 
        case ADAPTER_RESET:
@@ -929,55 +936,48 @@ qla1280_error_action(struct scsi_cmnd *cmd, enum action action)
                               "continue automatically\n", ha->host_no);
                }
                ha->flags.reset_active = 1;
-               /*
-                * We restarted all of the commands automatically, so the
-                * mid-level code can expect completions momentitarily.
-                */
-               if (qla1280_abort_isp(ha) == 0)
-                       result = SUCCESS;
+
+               if (qla1280_abort_isp(ha) != 0) {       /* it's dead */
+                       result = FAILED;
+               }
 
                ha->flags.reset_active = 0;
        }
 
-       if (!list_empty(&ha->done_q))
-               qla1280_done(ha);
-
-       /* If we didn't manage to issue the action, or we have no
-        * command to wait for, exit here */
-       if (result == FAILED || handle == NULL ||
-           handle == (unsigned char *)INVALID_HANDLE) {
-               /*
-                * Clear completion queue to avoid qla1280_done() trying
-                * to complete the command at a later stage after we
-                * have exited the current context
-                */
-               sp->wait = NULL;
-               goto leave;
-       }
+       /*
+        * At this point, the host_lock has been released and retaken
+        * by the issuance of the mailbox command.
+        * Wait for the command passed in by the mid-layer if it
+        * was found by the driver.  It might have been returned
+        * between eh recovery steps, hence the check of the "found"
+        * variable.
+        */
 
-       /* set up a timer just in case we're really jammed */
-       init_timer(&timer);
-       timer.expires = jiffies + 4*HZ;
-       timer.data = (unsigned long)cmd;
-       timer.function = qla1280_error_wait_timeout;
-       add_timer(&timer);
+       if (found >= 0)
+               result = _qla1280_wait_for_single_command(ha, sp, &wait);
 
-       /* wait for the action to complete (or the timer to expire) */
-       spin_unlock_irq(ha->host->host_lock);
-       wait_for_completion(&wait);
-       del_timer_sync(&timer);
-       spin_lock_irq(ha->host->host_lock);
-       sp->wait = NULL;
+       if (action == ABORT_COMMAND && result != SUCCESS) {
+               printk(KERN_WARNING
+                      "scsi(%li:%i:%i:%i): "
+                      "Unable to abort command!\n",
+                      ha->host_no, bus, target, lun);
+       }
 
-       /* the only action we might get a fail for is abort */
-       if (action == ABORT_COMMAND) {
-               if(sp->flags & SRB_ABORTED)
-                       result = SUCCESS;
-               else
-                       result = FAILED;
+       /*
+        * If the command passed in by the mid-layer has been
+        * returned by the board, then wait for any additional
+        * commands which are supposed to complete based upon
+        * the error action.
+        *
+        * All commands are unconditionally returned during a
+        * call to qla1280_abort_isp(), ADAPTER_RESET.  No need
+        * to wait for them.
+        */
+       if (result == SUCCESS && wait_for_bus >= 0) {
+               result = qla1280_wait_for_pending_commands(ha,
+                                       wait_for_bus, wait_for_target);
        }
 
- leave:
        dprintk(1, "RESET returning %d\n", result);
 
        LEAVE("qla1280_error_action");
@@ -1280,13 +1280,12 @@ qla1280_done(struct scsi_qla_host *ha)
                switch ((CMD_RESULT(cmd) >> 16)) {
                case DID_RESET:
                        /* Issue marker command. */
-                       qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
+                       if (!ha->flags.abort_isp_active)
+                               qla1280_marker(ha, bus, target, 0, MK_SYNC_ID);
                        break;
                case DID_ABORT:
                        sp->flags &= ~SRB_ABORT_PENDING;
                        sp->flags |= SRB_ABORTED;
-                       if (sp->flags & SRB_TIMEOUT)
-                               CMD_RESULT(sp->cmd) = DID_TIME_OUT << 16;
                        break;
                default:
                        break;
@@ -1296,12 +1295,11 @@ qla1280_done(struct scsi_qla_host *ha)
                scsi_dma_unmap(cmd);
 
                /* Call the mid-level driver interrupt handler */
-               CMD_HANDLE(sp->cmd) = (unsigned char *)INVALID_HANDLE;
                ha->actthreads--;
 
-               (*(cmd)->scsi_done)(cmd);
-
-               if(sp->wait != NULL)
+               if (sp->wait == NULL)
+                       (*(cmd)->scsi_done)(cmd);
+               else
                        complete(sp->wait);
        }
        LEAVE("qla1280_done");
@@ -2417,9 +2415,6 @@ static int
 qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
 {
        struct device_reg __iomem *reg = ha->iobase;
-#if 0
-       LIST_HEAD(done_q);
-#endif
        int status = 0;
        int cnt;
        uint16_t *optr, *iptr;
@@ -2493,19 +2488,9 @@ qla1280_mailbox_command(struct scsi_qla_host *ha, uint8_t mr, uint16_t *mb)
        mr = MAILBOX_REGISTER_COUNT;
        memcpy(optr, iptr, MAILBOX_REGISTER_COUNT * sizeof(uint16_t));
 
-#if 0
-       /* Go check for any response interrupts pending. */
-       qla1280_isr(ha, &done_q);
-#endif
-
        if (ha->flags.reset_marker)
                qla1280_rst_aen(ha);
 
-#if 0
-       if (!list_empty(&done_q))
-               qla1280_done(ha, &done_q);
-#endif
-
        if (status)
                dprintk(2, "qla1280_mailbox_command: **** FAILED, mailbox0 = "
                        "0x%x ****\n", mb[0]);
@@ -2640,41 +2625,6 @@ qla1280_device_reset(struct scsi_qla_host *ha, int bus, int target)
        return status;
 }
 
-/*
- * qla1280_abort_device
- *      Issue an abort message to the device
- *
- * Input:
- *      ha     = adapter block pointer.
- *      bus    = SCSI BUS.
- *      target = SCSI ID.
- *      lun    = SCSI LUN.
- *
- * Returns:
- *      0 = success
- */
-static int
-qla1280_abort_device(struct scsi_qla_host *ha, int bus, int target, int lun)
-{
-       uint16_t mb[MAILBOX_REGISTER_COUNT];
-       int status;
-
-       ENTER("qla1280_abort_device");
-
-       mb[0] = MBC_ABORT_DEVICE;
-       mb[1] = (bus ? target | BIT_7 : target) << 8 | lun;
-       status = qla1280_mailbox_command(ha, BIT_1 | BIT_0, &mb[0]);
-
-       /* Issue marker command. */
-       qla1280_marker(ha, bus, target, lun, MK_SYNC_ID_LUN);
-
-       if (status)
-               dprintk(2, "qla1280_abort_device: **** FAILED ****\n");
-
-       LEAVE("qla1280_abort_device");
-       return status;
-}
-
 /*
  * qla1280_abort_command
  *      Abort command aborts a specified IOCB.
@@ -2833,7 +2783,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
 
        /* If room for request in request ring. */
        if ((req_cnt + 2) >= ha->req_q_cnt) {
-               status = 1;
+               status = SCSI_MLQUEUE_HOST_BUSY;
                dprintk(2, "qla1280_start_scsi: in-ptr=0x%x  req_q_cnt="
                        "0x%xreq_cnt=0x%x", ha->req_ring_index, ha->req_q_cnt,
                        req_cnt);
@@ -2845,7 +2795,7 @@ qla1280_64bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                     ha->outstanding_cmds[cnt] != NULL; cnt++);
 
        if (cnt >= MAX_OUTSTANDING_COMMANDS) {
-               status = 1;
+               status = SCSI_MLQUEUE_HOST_BUSY;
                dprintk(2, "qla1280_start_scsi: NO ROOM IN "
                        "OUTSTANDING ARRAY, req_q_cnt=0x%x", ha->req_q_cnt);
                goto out;
@@ -3108,7 +3058,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                ha->req_q_cnt, seg_cnt);
        /* If room for request in request ring. */
        if ((req_cnt + 2) >= ha->req_q_cnt) {
-               status = 1;
+               status = SCSI_MLQUEUE_HOST_BUSY;
                dprintk(2, "qla1280_32bit_start_scsi: in-ptr=0x%x, "
                        "req_q_cnt=0x%x, req_cnt=0x%x", ha->req_ring_index,
                        ha->req_q_cnt, req_cnt);
@@ -3120,7 +3070,7 @@ qla1280_32bit_start_scsi(struct scsi_qla_host *ha, struct srb * sp)
                     (ha->outstanding_cmds[cnt] != 0); cnt++) ;
 
        if (cnt >= MAX_OUTSTANDING_COMMANDS) {
-               status = 1;
+               status = SCSI_MLQUEUE_HOST_BUSY;
                dprintk(2, "qla1280_32bit_start_scsi: NO ROOM IN OUTSTANDING "
                        "ARRAY, req_q_cnt=0x%x\n", ha->req_q_cnt);
                goto out;
@@ -3487,6 +3437,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
 
                                        /* Save ISP completion status */
                                        CMD_RESULT(sp->cmd) = 0;
+                                       CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
 
                                        /* Place block on done queue */
                                        list_add_tail(&sp->list, done_q);
@@ -3495,7 +3446,7 @@ qla1280_isr(struct scsi_qla_host *ha, struct list_head *done_q)
                                         * If we get here we have a real problem!
                                         */
                                        printk(KERN_WARNING
-                                              "qla1280: ISP invalid handle");
+                                              "qla1280: ISP invalid handle\n");
                                }
                        }
                        break;
@@ -3753,6 +3704,8 @@ qla1280_status_entry(struct scsi_qla_host *ha, struct response *pkt,
                }
        }
 
+       CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
+
        /* Place command on done queue. */
        list_add_tail(&sp->list, done_q);
  out:
@@ -3808,6 +3761,8 @@ qla1280_error_entry(struct scsi_qla_host *ha, struct response *pkt,
                        CMD_RESULT(sp->cmd) = DID_ERROR << 16;
                }
 
+               CMD_HANDLE(sp->cmd) = COMPLETED_HANDLE;
+
                /* Place command on done queue. */
                list_add_tail(&sp->list, done_q);
        }
@@ -3858,19 +3813,16 @@ qla1280_abort_isp(struct scsi_qla_host *ha)
                struct scsi_cmnd *cmd;
                sp = ha->outstanding_cmds[cnt];
                if (sp) {
-
                        cmd = sp->cmd;
                        CMD_RESULT(cmd) = DID_RESET << 16;
-
-                       sp->cmd = NULL;
+                       CMD_HANDLE(cmd) = COMPLETED_HANDLE;
                        ha->outstanding_cmds[cnt] = NULL;
-
-                       (*cmd->scsi_done)(cmd);
-
-                       sp->flags = 0;
+                       list_add_tail(&sp->list, &ha->done_q);
                }
        }
 
+       qla1280_done(ha);
+
        status = qla1280_load_firmware(ha);
        if (status)
                goto out;
@@ -3955,13 +3907,6 @@ qla1280_check_for_dead_scsi_bus(struct scsi_qla_host *ha, unsigned int bus)
 
                if (scsi_control == SCSI_PHASE_INVALID) {
                        ha->bus_settings[bus].scsi_bus_dead = 1;
-#if 0
-                       CMD_RESULT(cp) = DID_NO_CONNECT << 16;
-                       CMD_HANDLE(cp) = INVALID_HANDLE;
-                       /* ha->actthreads--; */
-
-                       (*(cp)->scsi_done)(cp);
-#endif
                        return 1;       /* bus is dead */
                } else {
                        ha->bus_settings[bus].scsi_bus_dead = 0;
index d7c44b8d2b4f5c9ef9c92c5bc8bb4a3029afc095..834884b9eed5b579e62a622ce4725c80ad897a7b 100644 (file)
@@ -88,7 +88,8 @@
 
 /* Maximum outstanding commands in ISP queues */
 #define MAX_OUTSTANDING_COMMANDS       512
-#define INVALID_HANDLE                 (MAX_OUTSTANDING_COMMANDS + 2)
+#define COMPLETED_HANDLE               ((unsigned char *) \
+                                       (MAX_OUTSTANDING_COMMANDS + 2))
 
 /* ISP request and response entry counts (37-65535) */
 #define REQUEST_ENTRY_CNT              255 /* Number of request entries. */
index b09993a06576cfbf37ead3608b53d5256bf084f4..0f8796201504c25a9f736f38f4ec8094b2578640 100644 (file)
@@ -97,7 +97,7 @@ qla2x00_sysfs_read_nvram(struct kobject *kobj,
                return 0;
 
        if (IS_NOCACHE_VPD_TYPE(ha))
-               ha->isp_ops->read_optrom(vha, ha->vpd, ha->flt_region_nvram << 2,
+               ha->isp_ops->read_optrom(vha, ha->nvram, ha->flt_region_nvram << 2,
                    ha->nvram_size);
        return memory_read_from_buffer(buf, count, &off, ha->nvram,
                                        ha->nvram_size);
@@ -692,6 +692,109 @@ static struct bin_attribute sysfs_edc_status_attr = {
        .read = qla2x00_sysfs_read_edc_status,
 };
 
+static ssize_t
+qla2x00_sysfs_read_xgmac_stats(struct kobject *kobj,
+                      struct bin_attribute *bin_attr,
+                      char *buf, loff_t off, size_t count)
+{
+       struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       struct qla_hw_data *ha = vha->hw;
+       int rval;
+       uint16_t actual_size;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count > XGMAC_DATA_SIZE)
+               return 0;
+
+       if (ha->xgmac_data)
+               goto do_read;
+
+       ha->xgmac_data = dma_alloc_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+           &ha->xgmac_data_dma, GFP_KERNEL);
+       if (!ha->xgmac_data) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to allocate memory for XGMAC read-data.\n");
+               return 0;
+       }
+
+do_read:
+       actual_size = 0;
+       memset(ha->xgmac_data, 0, XGMAC_DATA_SIZE);
+
+       rval = qla2x00_get_xgmac_stats(vha, ha->xgmac_data_dma,
+           XGMAC_DATA_SIZE, &actual_size);
+       if (rval != QLA_SUCCESS) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to read XGMAC data (%x).\n", rval);
+               count = 0;
+       }
+
+       count = actual_size > count ? count: actual_size;
+       memcpy(buf, ha->xgmac_data, count);
+
+       return count;
+}
+
+static struct bin_attribute sysfs_xgmac_stats_attr = {
+       .attr = {
+               .name = "xgmac_stats",
+               .mode = S_IRUSR,
+       },
+       .size = 0,
+       .read = qla2x00_sysfs_read_xgmac_stats,
+};
+
+static ssize_t
+qla2x00_sysfs_read_dcbx_tlv(struct kobject *kobj,
+                      struct bin_attribute *bin_attr,
+                      char *buf, loff_t off, size_t count)
+{
+       struct scsi_qla_host *vha = shost_priv(dev_to_shost(container_of(kobj,
+           struct device, kobj)));
+       struct qla_hw_data *ha = vha->hw;
+       int rval;
+       uint16_t actual_size;
+
+       if (!capable(CAP_SYS_ADMIN) || off != 0 || count > DCBX_TLV_DATA_SIZE)
+               return 0;
+
+       if (ha->dcbx_tlv)
+               goto do_read;
+
+       ha->dcbx_tlv = dma_alloc_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+           &ha->dcbx_tlv_dma, GFP_KERNEL);
+       if (!ha->dcbx_tlv) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to allocate memory for DCBX TLV read-data.\n");
+               return 0;
+       }
+
+do_read:
+       actual_size = 0;
+       memset(ha->dcbx_tlv, 0, DCBX_TLV_DATA_SIZE);
+
+       rval = qla2x00_get_dcbx_params(vha, ha->dcbx_tlv_dma,
+           DCBX_TLV_DATA_SIZE);
+       if (rval != QLA_SUCCESS) {
+               qla_printk(KERN_WARNING, ha,
+                   "Unable to read DCBX TLV data (%x).\n", rval);
+               count = 0;
+       }
+
+       memcpy(buf, ha->dcbx_tlv, count);
+
+       return count;
+}
+
+static struct bin_attribute sysfs_dcbx_tlv_attr = {
+       .attr = {
+               .name = "dcbx_tlv",
+               .mode = S_IRUSR,
+       },
+       .size = 0,
+       .read = qla2x00_sysfs_read_dcbx_tlv,
+};
+
 static struct sysfs_entry {
        char *name;
        struct bin_attribute *attr;
@@ -706,6 +809,8 @@ static struct sysfs_entry {
        { "reset", &sysfs_reset_attr, },
        { "edc", &sysfs_edc_attr, 2 },
        { "edc_status", &sysfs_edc_status_attr, 2 },
+       { "xgmac_stats", &sysfs_xgmac_stats_attr, 3 },
+       { "dcbx_tlv", &sysfs_dcbx_tlv_attr, 3 },
        { NULL },
 };
 
@@ -721,6 +826,8 @@ qla2x00_alloc_sysfs_attr(scsi_qla_host_t *vha)
                        continue;
                if (iter->is4GBp_only == 2 && !IS_QLA25XX(vha->hw))
                        continue;
+               if (iter->is4GBp_only == 3 && !IS_QLA81XX(vha->hw))
+                       continue;
 
                ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
                    iter->attr);
@@ -743,6 +850,8 @@ qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
                        continue;
                if (iter->is4GBp_only == 2 && !IS_QLA25XX(ha))
                        continue;
+               if (iter->is4GBp_only == 3 && !IS_QLA81XX(ha))
+                       continue;
 
                sysfs_remove_bin_file(&host->shost_gendev.kobj,
                    iter->attr);
@@ -1088,6 +1197,58 @@ qla2x00_flash_block_size_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
 }
 
+static ssize_t
+qla2x00_vlan_id_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       if (!IS_QLA81XX(vha->hw))
+               return snprintf(buf, PAGE_SIZE, "\n");
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
+}
+
+static ssize_t
+qla2x00_vn_port_mac_address_show(struct device *dev,
+    struct device_attribute *attr, char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       if (!IS_QLA81XX(vha->hw))
+               return snprintf(buf, PAGE_SIZE, "\n");
+
+       return snprintf(buf, PAGE_SIZE, "%02x:%02x:%02x:%02x:%02x:%02x\n",
+           vha->fcoe_vn_port_mac[5], vha->fcoe_vn_port_mac[4],
+           vha->fcoe_vn_port_mac[3], vha->fcoe_vn_port_mac[2],
+           vha->fcoe_vn_port_mac[1], vha->fcoe_vn_port_mac[0]);
+}
+
+static ssize_t
+qla2x00_fabric_param_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+
+       return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
+}
+
+static ssize_t
+qla2x00_fw_state_show(struct device *dev, struct device_attribute *attr,
+    char *buf)
+{
+       scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
+       int rval;
+       uint16_t state[5];
+
+       rval = qla2x00_get_firmware_state(vha, state);
+       if (rval != QLA_SUCCESS)
+               memset(state, -1, sizeof(state));
+
+       return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
+           state[1], state[2], state[3], state[4]);
+}
+
 static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
 static DEVICE_ATTR(fw_version, S_IRUGO, qla2x00_fw_version_show, NULL);
 static DEVICE_ATTR(serial_num, S_IRUGO, qla2x00_serial_num_show, NULL);
@@ -1116,6 +1277,11 @@ static DEVICE_ATTR(mpi_version, S_IRUGO, qla2x00_mpi_version_show, NULL);
 static DEVICE_ATTR(phy_version, S_IRUGO, qla2x00_phy_version_show, NULL);
 static DEVICE_ATTR(flash_block_size, S_IRUGO, qla2x00_flash_block_size_show,
                   NULL);
+static DEVICE_ATTR(vlan_id, S_IRUGO, qla2x00_vlan_id_show, NULL);
+static DEVICE_ATTR(vn_port_mac_address, S_IRUGO,
+                  qla2x00_vn_port_mac_address_show, NULL);
+static DEVICE_ATTR(fabric_param, S_IRUGO, qla2x00_fabric_param_show, NULL);
+static DEVICE_ATTR(fw_state, S_IRUGO, qla2x00_fw_state_show, NULL);
 
 struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_driver_version,
@@ -1138,6 +1304,10 @@ struct device_attribute *qla2x00_host_attrs[] = {
        &dev_attr_mpi_version,
        &dev_attr_phy_version,
        &dev_attr_flash_block_size,
+       &dev_attr_vlan_id,
+       &dev_attr_vn_port_mac_address,
+       &dev_attr_fabric_param,
+       &dev_attr_fw_state,
        NULL,
 };
 
@@ -1313,7 +1483,8 @@ qla2x00_terminate_rport_io(struct fc_rport *rport)
         * At this point all fcport's software-states are cleared.  Perform any
         * final cleanup of firmware resources (PCBs and XCBs).
         */
-       if (fcport->loop_id != FC_NO_LOOP_ID)
+       if (fcport->loop_id != FC_NO_LOOP_ID &&
+           !test_bit(UNLOADING, &fcport->vha->dpc_flags))
                fcport->vha->hw->isp_ops->fabric_logout(fcport->vha,
                        fcport->loop_id, fcport->d_id.b.domain,
                        fcport->d_id.b.area, fcport->d_id.b.al_pa);
@@ -1437,11 +1608,13 @@ static int
 qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 {
        int     ret = 0;
-       int     cnt = 0;
-       uint8_t qos = QLA_DEFAULT_QUE_QOS;
+       uint8_t qos = 0;
        scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost);
        scsi_qla_host_t *vha = NULL;
        struct qla_hw_data *ha = base_vha->hw;
+       uint16_t options = 0;
+       int     cnt;
+       struct req_que *req = ha->req_q_map[0];
 
        ret = qla24xx_vport_create_req_sanity_check(fc_vport);
        if (ret) {
@@ -1497,23 +1670,39 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
 
        qla24xx_vport_disable(fc_vport, disable);
 
-       /* Create a queue pair for the vport */
-       if (ha->mqenable) {
-               if (ha->npiv_info) {
-                       for (; cnt < ha->nvram_npiv_size; cnt++) {
-                               if (ha->npiv_info[cnt].port_name ==
-                                       vha->port_name &&
-                                       ha->npiv_info[cnt].node_name ==
-                                       vha->node_name) {
-                                       qos = ha->npiv_info[cnt].q_qos;
-                                       break;
-                               }
-                       }
+       if (ql2xmultique_tag) {
+               req = ha->req_q_map[1];
+               goto vport_queue;
+       } else if (ql2xmaxqueues == 1 || !ha->npiv_info)
+               goto vport_queue;
+       /* Create a request queue in QoS mode for the vport */
+       for (cnt = 0; cnt < ha->nvram_npiv_size; cnt++) {
+               if (memcmp(ha->npiv_info[cnt].port_name, vha->port_name, 8) == 0
+                       && memcmp(ha->npiv_info[cnt].node_name, vha->node_name,
+                                       8) == 0) {
+                       qos = ha->npiv_info[cnt].q_qos;
+                       break;
+               }
+       }
+       if (qos) {
+               ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, 0,
+                       qos);
+               if (!ret)
+                       qla_printk(KERN_WARNING, ha,
+                       "Can't create request queue for vp_idx:%d\n",
+                       vha->vp_idx);
+               else {
+                       DEBUG2(qla_printk(KERN_INFO, ha,
+                       "Request Que:%d (QoS: %d) created for vp_idx:%d\n",
+                       ret, qos, vha->vp_idx));
+                       req = ha->req_q_map[ret];
                }
-               qla25xx_create_queues(vha, qos);
        }
 
+vport_queue:
+       vha->req = req;
        return 0;
+
 vport_create_failed_2:
        qla24xx_disable_vp(vha);
        qla24xx_deallocate_vp_id(vha);
@@ -1554,8 +1743,8 @@ qla24xx_vport_delete(struct fc_vport *fc_vport)
                    vha->host_no, vha->vp_idx, vha));
         }
 
-       if (ha->mqenable) {
-               if (qla25xx_delete_queues(vha, 0) != QLA_SUCCESS)
+       if (vha->req->id && !ql2xmultique_tag) {
+               if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
                        qla_printk(KERN_WARNING, ha,
                                "Queue delete failed.\n");
        }
index 34760f8d4f1768f37a0687770d5f1a4d23e5d7da..4a990f4da4ea95254fda1e6c233e202e86ad6cea 100644 (file)
@@ -149,11 +149,9 @@ qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
        int rval = QLA_SUCCESS;
        uint32_t cnt;
 
-       if (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE)
-               return rval;
-
        WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
-       for (cnt = 30000; (RD_REG_DWORD(&reg->hccr) & HCCRX_RISC_PAUSE) == 0 &&
+       for (cnt = 30000;
+           ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
            rval == QLA_SUCCESS; cnt--) {
                if (cnt)
                        udelay(100);
@@ -351,7 +349,7 @@ static inline void *
 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
 {
        uint32_t cnt, que_idx;
-       uint8_t req_cnt, rsp_cnt, que_cnt;
+       uint8_t que_cnt;
        struct qla2xxx_mq_chain *mq = ptr;
        struct device_reg_25xxmq __iomem *reg;
 
@@ -363,9 +361,8 @@ qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
        mq->type = __constant_htonl(DUMP_CHAIN_MQ);
        mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
 
-       req_cnt = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
-       rsp_cnt = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
-       que_cnt = req_cnt > rsp_cnt ? req_cnt : rsp_cnt;
+       que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
+               ha->max_req_queues : ha->max_rsp_queues;
        mq->count = htonl(que_cnt);
        for (cnt = 0; cnt < que_cnt; cnt++) {
                reg = (struct device_reg_25xxmq *) ((void *)
index 714ee67567e1c814080342665e4d19dde48513ee..00aa48d975a698edcb73b1024c3f302f29a66a6c 100644 (file)
@@ -93,6 +93,7 @@
 #define LSD(x) ((uint32_t)((uint64_t)(x)))
 #define MSD(x) ((uint32_t)((((uint64_t)(x)) >> 16) >> 16))
 
+#define MAKE_HANDLE(x, y) ((uint32_t)((((uint32_t)(x)) << 16) | (uint32_t)(y)))
 
 /*
  * I/O register
 #define REQUEST_ENTRY_CNT_24XX         2048    /* Number of request entries. */
 #define RESPONSE_ENTRY_CNT_2100                64      /* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_2300                512     /* Number of response entries.*/
+#define RESPONSE_ENTRY_CNT_MQ          128     /* Number of response entries.*/
 
 struct req_que;
 
@@ -186,7 +188,6 @@ struct req_que;
  * SCSI Request Block
  */
 typedef struct srb {
-       struct req_que *que;
        struct fc_port *fcport;
 
        struct scsi_cmnd *cmd;          /* Linux SCSI command pkt */
@@ -2008,7 +2009,7 @@ typedef struct vport_params {
 #define VP_RET_CODE_NOT_FOUND          6
 
 struct qla_hw_data;
-
+struct rsp_que;
 /*
  * ISP operations
  */
@@ -2030,10 +2031,9 @@ struct isp_operations {
        void (*enable_intrs) (struct qla_hw_data *);
        void (*disable_intrs) (struct qla_hw_data *);
 
-       int (*abort_command) (struct scsi_qla_host *, srb_t *,
-               struct req_que *);
-       int (*target_reset) (struct fc_port *, unsigned int);
-       int (*lun_reset) (struct fc_port *, unsigned int);
+       int (*abort_command) (srb_t *);
+       int (*target_reset) (struct fc_port *, unsigned int, int);
+       int (*lun_reset) (struct fc_port *, unsigned int, int);
        int (*fabric_login) (struct scsi_qla_host *, uint16_t, uint8_t,
                uint8_t, uint8_t, uint16_t *, uint8_t);
        int (*fabric_logout) (struct scsi_qla_host *, uint16_t, uint8_t,
@@ -2079,7 +2079,6 @@ struct isp_operations {
 #define QLA_PCI_MSIX_CONTROL   0xa2
 
 struct scsi_qla_host;
-struct rsp_que;
 
 struct qla_msix_entry {
        int have_irq;
@@ -2140,7 +2139,6 @@ struct qla_statistics {
 #define MBC_INITIALIZE_MULTIQ 0x1f
 #define QLA_QUE_PAGE 0X1000
 #define QLA_MQ_SIZE 32
-#define QLA_MAX_HOST_QUES 16
 #define QLA_MAX_QUEUES 256
 #define ISP_QUE_REG(ha, id) \
        ((ha->mqenable) ? \
@@ -2170,6 +2168,8 @@ struct rsp_que {
        struct qla_hw_data *hw;
        struct qla_msix_entry *msix;
        struct req_que *req;
+       srb_t *status_srb; /* status continuation entry */
+       struct work_struct q_work;
 };
 
 /* Request queue data structure */
@@ -2222,6 +2222,8 @@ struct qla_hw_data {
                uint32_t        fce_enabled             :1;
                uint32_t        fac_supported           :1;
                uint32_t        chip_reset_done         :1;
+               uint32_t        port0                   :1;
+               uint32_t        running_gold_fw         :1;
        } flags;
 
        /* This spinlock is used to protect "io transactions", you must
@@ -2246,7 +2248,8 @@ struct qla_hw_data {
        struct rsp_que **rsp_q_map;
        unsigned long req_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
        unsigned long rsp_qid_map[(QLA_MAX_QUEUES / 8) / sizeof(unsigned long)];
-       uint16_t        max_queues;
+       uint8_t         max_req_queues;
+       uint8_t         max_rsp_queues;
        struct qla_npiv_entry *npiv_info;
        uint16_t        nvram_npiv_size;
 
@@ -2255,6 +2258,9 @@ struct qla_hw_data {
 #define FLOGI_MID_SUPPORT       BIT_10
 #define FLOGI_VSAN_SUPPORT      BIT_12
 #define FLOGI_SP_SUPPORT        BIT_13
+
+       uint8_t         port_no;                /* Physical port of adapter */
+
        /* Timeout timers. */
        uint8_t         loop_down_abort_time;    /* port down timer */
        atomic_t        loop_down_timer;         /* loop down timer */
@@ -2392,6 +2398,14 @@ struct qla_hw_data {
        dma_addr_t      edc_data_dma;
        uint16_t        edc_data_len;
 
+#define XGMAC_DATA_SIZE        PAGE_SIZE
+       void            *xgmac_data;
+       dma_addr_t      xgmac_data_dma;
+
+#define DCBX_TLV_DATA_SIZE PAGE_SIZE
+       void            *dcbx_tlv;
+       dma_addr_t      dcbx_tlv_dma;
+
        struct task_struct      *dpc_thread;
        uint8_t dpc_active;                  /* DPC routine is active */
 
@@ -2510,6 +2524,7 @@ struct qla_hw_data {
        uint32_t        flt_region_vpd;
        uint32_t        flt_region_nvram;
        uint32_t        flt_region_npiv_conf;
+       uint32_t        flt_region_gold_fw;
 
        /* Needed for BEACON */
        uint16_t        beacon_blink_led;
@@ -2536,6 +2551,7 @@ struct qla_hw_data {
        struct qla_chip_state_84xx *cs84xx;
        struct qla_statistics qla_stats;
        struct isp_operations *isp_ops;
+       struct workqueue_struct *wq;
 };
 
 /*
@@ -2545,6 +2561,8 @@ typedef struct scsi_qla_host {
        struct list_head list;
        struct list_head vp_fcports;    /* list of fcports */
        struct list_head work_list;
+       spinlock_t work_lock;
+
        /* Commonly used flags and state information. */
        struct Scsi_Host *host;
        unsigned long   host_no;
@@ -2591,8 +2609,6 @@ typedef struct scsi_qla_host {
 #define SWITCH_FOUND           BIT_0
 #define DFLG_NO_CABLE          BIT_1
 
-       srb_t           *status_srb;    /* Status continuation entry. */
-
        /* ISP configuration data. */
        uint16_t        loop_id;                /* Host adapter loop id */
 
@@ -2618,6 +2634,11 @@ typedef struct scsi_qla_host {
        uint8_t         node_name[WWN_SIZE];
        uint8_t         port_name[WWN_SIZE];
        uint8_t         fabric_node_name[WWN_SIZE];
+
+       uint16_t        fcoe_vlan_id;
+       uint16_t        fcoe_fcf_idx;
+       uint8_t         fcoe_vn_port_mac[6];
+
        uint32_t        vp_abort_cnt;
 
        struct fc_vport *fc_vport;      /* holds fc_vport * for each vport */
@@ -2643,7 +2664,7 @@ typedef struct scsi_qla_host {
 #define VP_ERR_FAB_LOGOUT      4
 #define VP_ERR_ADAP_NORESOURCES        5
        struct qla_hw_data *hw;
-       int     req_ques[QLA_MAX_HOST_QUES];
+       struct req_que *req;
 } scsi_qla_host_t;
 
 /*
index 96ccb9642ba09e27c17e6a86ab1b3071f1e40323..dfde2dd865cbfea7008c9f104fc9c69873e24e20 100644 (file)
@@ -878,7 +878,6 @@ struct device_reg_24xx {
                                        /* HCCR statuses. */
 #define HCCRX_HOST_INT         BIT_6   /* Host to RISC interrupt bit. */
 #define HCCRX_RISC_RESET       BIT_5   /* RISC Reset mode bit. */
-#define HCCRX_RISC_PAUSE       BIT_4   /* RISC Pause mode bit. */
                                        /* HCCR commands. */
                                        /* NOOP. */
 #define HCCRX_NOOP             0x00000000
@@ -1241,6 +1240,7 @@ struct qla_flt_header {
 #define FLT_REG_HW_EVENT_1     0x1f
 #define FLT_REG_NPIV_CONF_0    0x29
 #define FLT_REG_NPIV_CONF_1    0x2a
+#define FLT_REG_GOLD_FW                0x2f
 
 struct qla_flt_region {
        uint32_t code;
@@ -1405,6 +1405,8 @@ struct access_chip_rsp_84xx {
 #define MBC_IDC_ACK            0x101
 #define MBC_RESTART_MPI_FW     0x3d
 #define MBC_FLASH_ACCESS_CTRL  0x3e    /* Control flash access. */
+#define MBC_GET_XGMAC_STATS    0x7a
+#define MBC_GET_DCBX_PARAMS    0x51
 
 /* Flash access control option field bit definitions */
 #define FAC_OPT_FORCE_SEMAPHORE                BIT_15
@@ -1711,7 +1713,7 @@ struct ex_init_cb_81xx {
 #define FA_VPD0_ADDR_81                0xD0000
 #define FA_VPD1_ADDR_81                0xD0400
 #define FA_NVRAM0_ADDR_81      0xD0080
-#define FA_NVRAM1_ADDR_81      0xD0480
+#define FA_NVRAM1_ADDR_81      0xD0180
 #define FA_FEATURE_ADDR_81     0xD4000
 #define FA_FLASH_DESCR_ADDR_81 0xD8000
 #define FA_FLASH_LAYOUT_ADDR_81        0xD8400
index 528913f6bed99ffc010eb3ec6d680660403683c7..65b12d82867c9ff6a6b128785b0b0034d16e9dd3 100644 (file)
@@ -65,8 +65,11 @@ extern int ql2xfdmienable;
 extern int ql2xallocfwdump;
 extern int ql2xextended_error_logging;
 extern int ql2xqfullrampup;
+extern int ql2xqfulltracking;
 extern int ql2xiidmaenable;
 extern int ql2xmaxqueues;
+extern int ql2xmultique_tag;
+extern int ql2xfwloadbin;
 
 extern int qla2x00_loop_reset(scsi_qla_host_t *);
 extern void qla2x00_abort_all_cmds(scsi_qla_host_t *, int);
@@ -145,7 +148,7 @@ qla2x00_dump_ram(scsi_qla_host_t *, dma_addr_t, uint32_t, uint32_t);
 extern int
 qla2x00_execute_fw(scsi_qla_host_t *, uint32_t);
 
-extern void
+extern int
 qla2x00_get_fw_version(scsi_qla_host_t *, uint16_t *, uint16_t *, uint16_t *,
     uint16_t *, uint32_t *, uint8_t *, uint32_t *, uint8_t *);
 
@@ -165,13 +168,13 @@ extern int
 qla2x00_issue_iocb(scsi_qla_host_t *, void *, dma_addr_t, size_t);
 
 extern int
-qla2x00_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
+qla2x00_abort_command(srb_t *);
 
 extern int
-qla2x00_abort_target(struct fc_port *, unsigned int);
+qla2x00_abort_target(struct fc_port *, unsigned int, int);
 
 extern int
-qla2x00_lun_reset(struct fc_port *, unsigned int);
+qla2x00_lun_reset(struct fc_port *, unsigned int, int);
 
 extern int
 qla2x00_get_adapter_id(scsi_qla_host_t *, uint16_t *, uint8_t *, uint8_t *,
@@ -236,9 +239,11 @@ extern int
 qla24xx_get_isp_stats(scsi_qla_host_t *, struct link_statistics *,
     dma_addr_t);
 
-extern int qla24xx_abort_command(scsi_qla_host_t *, srb_t *, struct req_que *);
-extern int qla24xx_abort_target(struct fc_port *, unsigned int);
-extern int qla24xx_lun_reset(struct fc_port *, unsigned int);
+extern int qla24xx_abort_command(srb_t *);
+extern int
+qla24xx_abort_target(struct fc_port *, unsigned int, int);
+extern int
+qla24xx_lun_reset(struct fc_port *, unsigned int, int);
 
 extern int
 qla2x00_system_error(scsi_qla_host_t *);
@@ -288,6 +293,18 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *, int);
 extern int
 qla81xx_fac_erase_sector(scsi_qla_host_t *, uint32_t, uint32_t);
 
+extern int
+qla2x00_get_xgmac_stats(scsi_qla_host_t *, dma_addr_t, uint16_t, uint16_t *);
+
+extern int
+qla2x00_get_dcbx_params(scsi_qla_host_t *, dma_addr_t, uint16_t);
+
+extern int
+qla2x00_read_ram_word(scsi_qla_host_t *, uint32_t, uint32_t *);
+
+extern int
+qla2x00_write_ram_word(scsi_qla_host_t *, uint32_t, uint32_t);
+
 /*
  * Global Function Prototypes in qla_isr.c source file.
  */
@@ -295,8 +312,8 @@ extern irqreturn_t qla2100_intr_handler(int, void *);
 extern irqreturn_t qla2300_intr_handler(int, void *);
 extern irqreturn_t qla24xx_intr_handler(int, void *);
 extern void qla2x00_process_response_queue(struct rsp_que *);
-extern void qla24xx_process_response_queue(struct rsp_que *);
-
+extern void
+qla24xx_process_response_queue(struct scsi_qla_host *, struct rsp_que *);
 extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
 extern void qla2x00_free_irqs(scsi_qla_host_t *);
 
@@ -401,19 +418,21 @@ extern int qla25xx_request_irq(struct rsp_que *);
 extern int qla25xx_init_req_que(struct scsi_qla_host *, struct req_que *);
 extern int qla25xx_init_rsp_que(struct scsi_qla_host *, struct rsp_que *);
 extern int qla25xx_create_req_que(struct qla_hw_data *, uint16_t, uint8_t,
-       uint16_t, uint8_t, uint8_t);
+       uint16_t, int, uint8_t);
 extern int qla25xx_create_rsp_que(struct qla_hw_data *, uint16_t, uint8_t,
-       uint16_t);
+       uint16_t, int);
 extern int qla25xx_update_req_que(struct scsi_qla_host *, uint8_t, uint8_t);
 extern void qla2x00_init_response_q_entries(struct rsp_que *);
 extern int qla25xx_delete_req_que(struct scsi_qla_host *, struct req_que *);
 extern int qla25xx_delete_rsp_que(struct scsi_qla_host *, struct rsp_que *);
 extern int qla25xx_create_queues(struct scsi_qla_host *, uint8_t);
-extern int qla25xx_delete_queues(struct scsi_qla_host *, uint8_t);
+extern int qla25xx_delete_queues(struct scsi_qla_host *);
 extern uint16_t qla24xx_rd_req_reg(struct qla_hw_data *, uint16_t);
 extern uint16_t qla25xx_rd_req_reg(struct qla_hw_data *, uint16_t);
 extern void qla24xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla25xx_wrt_req_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla25xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
 extern void qla24xx_wrt_rsp_reg(struct qla_hw_data *, uint16_t, uint16_t);
+extern struct scsi_qla_host * qla25xx_get_host(struct rsp_que *);
+
 #endif /* _QLA_GBL_H */
index 557f58d5bf88074174c5b3c7213d6d7eed9c96d1..917534b9f2216c50784888d514d764e950c7a572 100644 (file)
@@ -1107,7 +1107,7 @@ qla2x00_mgmt_svr_login(scsi_qla_host_t *vha)
                return ret;
 
        ha->isp_ops->fabric_login(vha, vha->mgmt_svr_loop_id, 0xff, 0xff, 0xfa,
-           mb, BIT_1);
+           mb, BIT_1|BIT_0);
        if (mb[0] != MBS_COMMAND_COMPLETE) {
                DEBUG2_13(printk("%s(%ld): Failed MANAGEMENT_SERVER login: "
                    "loop_id=%x mb[0]=%x mb[1]=%x mb[2]=%x mb[6]=%x mb[7]=%x\n",
@@ -1879,6 +1879,9 @@ qla2x00_gpsc(scsi_qla_host_t *vha, sw_info_t *list)
                        case BIT_13:
                                list[i].fp_speed = PORT_SPEED_4GB;
                                break;
+                       case BIT_12:
+                               list[i].fp_speed = PORT_SPEED_10GB;
+                               break;
                        case BIT_11:
                                list[i].fp_speed = PORT_SPEED_8GB;
                                break;
index bd7dd84c06485b4947ef4f38b66a799ff7230305..26202612932534cbd2b0bebcec847d1cb7d3ce6c 100644 (file)
@@ -634,7 +634,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
                goto chip_diag_failed;
 
        DEBUG3(printk("scsi(%ld): Reset register cleared by chip reset\n",
-           ha->host_no));
+           vha->host_no));
 
        /* Reset RISC processor. */
        WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
@@ -655,7 +655,7 @@ qla2x00_chip_diag(scsi_qla_host_t *vha)
                goto chip_diag_failed;
 
        /* Check product ID of chip */
-       DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", ha->host_no));
+       DEBUG3(printk("scsi(%ld): Checking product ID of chip\n", vha->host_no));
 
        mb[1] = RD_MAILBOX_REG(ha, reg, 1);
        mb[2] = RD_MAILBOX_REG(ha, reg, 2);
@@ -730,9 +730,6 @@ qla24xx_chip_diag(scsi_qla_host_t *vha)
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = ha->req_q_map[0];
 
-       /* Perform RISC reset. */
-       qla24xx_reset_risc(vha);
-
        ha->fw_transfer_size = REQUEST_ENTRY_SIZE * req->length;
 
        rval = qla2x00_mbx_reg_test(vha);
@@ -786,7 +783,6 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
                    sizeof(uint32_t);
                if (ha->mqenable)
                        mq_size = sizeof(struct qla2xxx_mq_chain);
-
                /* Allocate memory for Fibre Channel Event Buffer. */
                if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
                        goto try_eft;
@@ -850,8 +846,7 @@ cont_alloc:
        rsp_q_size = rsp->length * sizeof(response_t);
 
        dump_size = offsetof(struct qla2xxx_fw_dump, isp);
-       dump_size += fixed_size + mem_size + req_q_size + rsp_q_size +
-           eft_size;
+       dump_size += fixed_size + mem_size + req_q_size + rsp_q_size + eft_size;
        ha->chain_offset = dump_size;
        dump_size += mq_size + fce_size;
 
@@ -891,6 +886,56 @@ cont_alloc:
            htonl(offsetof(struct qla2xxx_fw_dump, isp));
 }
 
+static int
+qla81xx_mpi_sync(scsi_qla_host_t *vha)
+{
+#define MPS_MASK       0xe0
+       int rval;
+       uint16_t dc;
+       uint32_t dw;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (!IS_QLA81XX(vha->hw))
+               return QLA_SUCCESS;
+
+       rval = qla2x00_write_ram_word(vha, 0x7c00, 1);
+       if (rval != QLA_SUCCESS) {
+               DEBUG2(qla_printk(KERN_WARNING, ha,
+                   "Sync-MPI: Unable to acquire semaphore.\n"));
+               goto done;
+       }
+
+       pci_read_config_word(vha->hw->pdev, 0x54, &dc);
+       rval = qla2x00_read_ram_word(vha, 0x7a15, &dw);
+       if (rval != QLA_SUCCESS) {
+               DEBUG2(qla_printk(KERN_WARNING, ha,
+                   "Sync-MPI: Unable to read sync.\n"));
+               goto done_release;
+       }
+
+       dc &= MPS_MASK;
+       if (dc == (dw & MPS_MASK))
+               goto done_release;
+
+       dw &= ~MPS_MASK;
+       dw |= dc;
+       rval = qla2x00_write_ram_word(vha, 0x7a15, dw);
+       if (rval != QLA_SUCCESS) {
+               DEBUG2(qla_printk(KERN_WARNING, ha,
+                   "Sync-MPI: Unable to gain sync.\n"));
+       }
+
+done_release:
+       rval = qla2x00_write_ram_word(vha, 0x7c00, 0);
+       if (rval != QLA_SUCCESS) {
+               DEBUG2(qla_printk(KERN_WARNING, ha,
+                   "Sync-MPI: Unable to release semaphore.\n"));
+       }
+
+done:
+       return rval;
+}
+
 /**
  * qla2x00_setup_chip() - Load and start RISC firmware.
  * @ha: HA context
@@ -915,6 +960,8 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
        }
 
+       qla81xx_mpi_sync(vha);
+
        /* Load firmware sequences */
        rval = ha->isp_ops->load_risc(vha, &srisc_address);
        if (rval == QLA_SUCCESS) {
@@ -931,13 +978,16 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
                        /* Retrieve firmware information. */
                        if (rval == QLA_SUCCESS) {
                                fw_major_version = ha->fw_major_version;
-                               qla2x00_get_fw_version(vha,
+                               rval = qla2x00_get_fw_version(vha,
                                    &ha->fw_major_version,
                                    &ha->fw_minor_version,
                                    &ha->fw_subminor_version,
                                    &ha->fw_attributes, &ha->fw_memory_size,
                                    ha->mpi_version, &ha->mpi_capabilities,
                                    ha->phy_version);
+                               if (rval != QLA_SUCCESS)
+                                       goto failed;
+
                                ha->flags.npiv_supported = 0;
                                if (IS_QLA2XXX_MIDTYPE(ha) &&
                                         (ha->fw_attributes & BIT_2)) {
@@ -989,7 +1039,7 @@ qla2x00_setup_chip(scsi_qla_host_t *vha)
                            ha->fw_subminor_version);
                }
        }
-
+failed:
        if (rval) {
                DEBUG2_3(printk("scsi(%ld): Setup chip **** FAILED ****.\n",
                    vha->host_no));
@@ -1013,12 +1063,14 @@ qla2x00_init_response_q_entries(struct rsp_que *rsp)
        uint16_t cnt;
        response_t *pkt;
 
+       rsp->ring_ptr = rsp->ring;
+       rsp->ring_index    = 0;
+       rsp->status_srb = NULL;
        pkt = rsp->ring_ptr;
        for (cnt = 0; cnt < rsp->length; cnt++) {
                pkt->signature = RESPONSE_PROCESSED;
                pkt++;
        }
-
 }
 
 /**
@@ -1176,7 +1228,7 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
                if (ha->flags.msix_enabled) {
                        msix = &ha->msix_entries[1];
                        DEBUG2_17(printk(KERN_INFO
-                       "Reistering vector 0x%x for base que\n", msix->entry));
+                       "Registering vector 0x%x for base que\n", msix->entry));
                        icb->msix = cpu_to_le16(msix->entry);
                }
                /* Use alternate PCI bus number */
@@ -1230,14 +1282,14 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
        spin_lock_irqsave(&ha->hardware_lock, flags);
 
        /* Clear outstanding commands array. */
-       for (que = 0; que < ha->max_queues; que++) {
+       for (que = 0; que < ha->max_req_queues; que++) {
                req = ha->req_q_map[que];
                if (!req)
                        continue;
-               for (cnt = 0; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+               for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
                        req->outstanding_cmds[cnt] = NULL;
 
-               req->current_outstanding_cmd = 0;
+               req->current_outstanding_cmd = 1;
 
                /* Initialize firmware. */
                req->ring_ptr  = req->ring;
@@ -1245,13 +1297,10 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
                req->cnt      = req->length;
        }
 
-       for (que = 0; que < ha->max_queues; que++) {
+       for (que = 0; que < ha->max_rsp_queues; que++) {
                rsp = ha->rsp_q_map[que];
                if (!rsp)
                        continue;
-               rsp->ring_ptr = rsp->ring;
-               rsp->ring_index    = 0;
-
                /* Initialize response queue entries */
                qla2x00_init_response_q_entries(rsp);
        }
@@ -1307,7 +1356,7 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
        unsigned long   wtime, mtime, cs84xx_time;
        uint16_t        min_wait;       /* Minimum wait time if loop is down */
        uint16_t        wait_time;      /* Wait time if loop is coming ready */
-       uint16_t        state[3];
+       uint16_t        state[5];
        struct qla_hw_data *ha = vha->hw;
 
        rval = QLA_SUCCESS;
@@ -1406,8 +1455,9 @@ qla2x00_fw_ready(scsi_qla_host_t *vha)
                    vha->host_no, state[0], jiffies));
        } while (1);
 
-       DEBUG(printk("scsi(%ld): fw_state=%x curr time=%lx.\n",
-           vha->host_no, state[0], jiffies));
+       DEBUG(printk("scsi(%ld): fw_state=%x (%x, %x, %x, %x) curr time=%lx.\n",
+           vha->host_no, state[0], state[1], state[2], state[3], state[4],
+           jiffies));
 
        if (rval) {
                DEBUG2_3(printk("scsi(%ld): Firmware ready **** FAILED ****.\n",
@@ -1541,6 +1591,7 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
        char *st, *en;
        uint16_t index;
        struct qla_hw_data *ha = vha->hw;
+       int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
 
        if (memcmp(model, BINZERO, len) != 0) {
                strncpy(ha->model_number, model, len);
@@ -1553,14 +1604,16 @@ qla2x00_set_model_info(scsi_qla_host_t *vha, uint8_t *model, size_t len,
                }
 
                index = (ha->pdev->subsystem_device & 0xff);
-               if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+               if (use_tbl &&
+                   ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
                    index < QLA_MODEL_NAMES)
                        strncpy(ha->model_desc,
                            qla2x00_model_name[index * 2 + 1],
                            sizeof(ha->model_desc) - 1);
        } else {
                index = (ha->pdev->subsystem_device & 0xff);
-               if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
+               if (use_tbl &&
+                   ha->pdev->subsystem_vendor == PCI_VENDOR_ID_QLOGIC &&
                    index < QLA_MODEL_NAMES) {
                        strcpy(ha->model_number,
                            qla2x00_model_name[index * 2]);
@@ -2061,8 +2114,10 @@ qla2x00_configure_loop(scsi_qla_host_t *vha)
        if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) {
                if (test_bit(LOCAL_LOOP_UPDATE, &save_flags))
                        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
-               if (test_bit(RSCN_UPDATE, &save_flags))
+               if (test_bit(RSCN_UPDATE, &save_flags)) {
                        set_bit(RSCN_UPDATE, &vha->dpc_flags);
+                       vha->flags.rscn_queue_overflow = 1;
+               }
        }
 
        return (rval);
@@ -2110,7 +2165,7 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
                goto cleanup_allocation;
 
        DEBUG3(printk("scsi(%ld): Entries in ID list (%d)\n",
-           ha->host_no, entries));
+           vha->host_no, entries));
        DEBUG3(qla2x00_dump_buffer((uint8_t *)ha->gid_list,
            entries * sizeof(struct gid_list_info)));
 
@@ -2243,7 +2298,8 @@ static void
 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 #define LS_UNKNOWN      2
-       static char *link_speeds[5] = { "1", "2", "?", "4", "8" };
+       static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
+       char *link_speed;
        int rval;
        uint16_t mb[6];
        struct qla_hw_data *ha = vha->hw;
@@ -2266,10 +2322,15 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
                    fcport->port_name[6], fcport->port_name[7], rval,
                    fcport->fp_speed, mb[0], mb[1]));
        } else {
+               link_speed = link_speeds[LS_UNKNOWN];
+               if (fcport->fp_speed < 5)
+                       link_speed = link_speeds[fcport->fp_speed];
+               else if (fcport->fp_speed == 0x13)
+                       link_speed = link_speeds[5];
                DEBUG2(qla_printk(KERN_INFO, ha,
                    "iIDMA adjusted to %s GB/s on "
                    "%02x%02x%02x%02x%02x%02x%02x%02x.\n",
-                   link_speeds[fcport->fp_speed], fcport->port_name[0],
+                   link_speed, fcport->port_name[0],
                    fcport->port_name[1], fcport->port_name[2],
                    fcport->port_name[3], fcport->port_name[4],
                    fcport->port_name[5], fcport->port_name[6],
@@ -3180,9 +3241,14 @@ qla2x00_loop_resync(scsi_qla_host_t *vha)
 {
        int rval = QLA_SUCCESS;
        uint32_t wait_time;
-       struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = ha->req_q_map[vha->req_ques[0]];
-       struct rsp_que *rsp = req->rsp;
+       struct req_que *req;
+       struct rsp_que *rsp;
+
+       if (ql2xmultique_tag)
+               req = vha->hw->req_q_map[0];
+       else
+               req = vha->req;
+       rsp = req->rsp;
 
        atomic_set(&vha->loop_state, LOOP_UPDATE);
        clear_bit(ISP_ABORT_RETRY, &vha->dpc_flags);
@@ -3448,7 +3514,7 @@ qla25xx_init_queues(struct qla_hw_data *ha)
        int ret = -1;
        int i;
 
-       for (i = 1; i < ha->max_queues; i++) {
+       for (i = 1; i < ha->max_rsp_queues; i++) {
                rsp = ha->rsp_q_map[i];
                if (rsp) {
                        rsp->options &= ~BIT_0;
@@ -3462,6 +3528,8 @@ qla25xx_init_queues(struct qla_hw_data *ha)
                                        "%s Rsp que:%d inited\n", __func__,
                                                rsp->id));
                }
+       }
+       for (i = 1; i < ha->max_req_queues; i++) {
                req = ha->req_q_map[i];
                if (req) {
                /* Clear outstanding commands array. */
@@ -3566,14 +3634,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        nv = ha->nvram;
 
        /* Determine NVRAM starting address. */
-       ha->nvram_size = sizeof(struct nvram_24xx);
-       ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
-       ha->vpd_size = FA_NVRAM_VPD_SIZE;
-       ha->vpd_base = FA_NVRAM_VPD0_ADDR;
-       if (PCI_FUNC(ha->pdev->devfn)) {
+       if (ha->flags.port0) {
+               ha->nvram_base = FA_NVRAM_FUNC0_ADDR;
+               ha->vpd_base = FA_NVRAM_VPD0_ADDR;
+       } else {
                ha->nvram_base = FA_NVRAM_FUNC1_ADDR;
                ha->vpd_base = FA_NVRAM_VPD1_ADDR;
        }
+       ha->nvram_size = sizeof(struct nvram_24xx);
+       ha->vpd_size = FA_NVRAM_VPD_SIZE;
 
        /* Get VPD data into cache */
        ha->vpd = ha->nvram + VPD_OFFSET;
@@ -3587,7 +3656,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
        for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
                chksum += le32_to_cpu(*dptr++);
 
-       DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
+       DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
        DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
 
        /* Bad NVRAM data, set defaults parameters. */
@@ -3612,7 +3681,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
                nv->exchange_count = __constant_cpu_to_le16(0);
                nv->hard_address = __constant_cpu_to_le16(124);
                nv->port_name[0] = 0x21;
-               nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
+               nv->port_name[1] = 0x00 + ha->port_no;
                nv->port_name[2] = 0x00;
                nv->port_name[3] = 0xe0;
                nv->port_name[4] = 0x8b;
@@ -3798,11 +3867,11 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
 }
 
 static int
-qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
+qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr,
+    uint32_t faddr)
 {
        int     rval = QLA_SUCCESS;
        int     segments, fragment;
-       uint32_t faddr;
        uint32_t *dcode, dlen;
        uint32_t risc_addr;
        uint32_t risc_size;
@@ -3811,12 +3880,11 @@ qla24xx_load_risc_flash(scsi_qla_host_t *vha, uint32_t *srisc_addr)
        struct req_que *req = ha->req_q_map[0];
 
        qla_printk(KERN_INFO, ha,
-           "FW: Loading from flash (%x)...\n", ha->flt_region_fw);
+           "FW: Loading from flash (%x)...\n", faddr);
 
        rval = QLA_SUCCESS;
 
        segments = FA_RISC_CODE_SEGMENTS;
-       faddr = ha->flt_region_fw;
        dcode = (uint32_t *)req->ring;
        *srisc_addr = 0;
 
@@ -4104,6 +4172,9 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
        int rval;
 
+       if (ql2xfwloadbin == 1)
+               return qla81xx_load_risc(vha, srisc_addr);
+
        /*
         * FW Load priority:
         * 1) Firmware via request-firmware interface (.bin file).
@@ -4113,24 +4184,45 @@ qla24xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
        if (rval == QLA_SUCCESS)
                return rval;
 
-       return qla24xx_load_risc_flash(vha, srisc_addr);
+       return qla24xx_load_risc_flash(vha, srisc_addr,
+           vha->hw->flt_region_fw);
 }
 
 int
 qla81xx_load_risc(scsi_qla_host_t *vha, uint32_t *srisc_addr)
 {
        int rval;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (ql2xfwloadbin == 2)
+               goto try_blob_fw;
 
        /*
         * FW Load priority:
         * 1) Firmware residing in flash.
         * 2) Firmware via request-firmware interface (.bin file).
+        * 3) Golden-Firmware residing in flash -- limited operation.
         */
-       rval = qla24xx_load_risc_flash(vha, srisc_addr);
+       rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_fw);
        if (rval == QLA_SUCCESS)
                return rval;
 
-       return qla24xx_load_risc_blob(vha, srisc_addr);
+try_blob_fw:
+       rval = qla24xx_load_risc_blob(vha, srisc_addr);
+       if (rval == QLA_SUCCESS || !ha->flt_region_gold_fw)
+               return rval;
+
+       qla_printk(KERN_ERR, ha,
+           "FW: Attempting to fallback to golden firmware...\n");
+       rval = qla24xx_load_risc_flash(vha, srisc_addr, ha->flt_region_gold_fw);
+       if (rval != QLA_SUCCESS)
+               return rval;
+
+       qla_printk(KERN_ERR, ha,
+           "FW: Please update operational firmware...\n");
+       ha->flags.running_gold_fw = 1;
+
+       return rval;
 }
 
 void
@@ -4146,7 +4238,7 @@ qla2x00_try_to_stop_firmware(scsi_qla_host_t *vha)
 
        ret = qla2x00_stop_firmware(vha);
        for (retries = 5; ret != QLA_SUCCESS && ret != QLA_FUNCTION_TIMEOUT &&
-           retries ; retries--) {
+           ret != QLA_INVALID_COMMAND && retries ; retries--) {
                ha->isp_ops->reset_chip(vha);
                if (ha->isp_ops->chip_diag(vha) != QLA_SUCCESS)
                        continue;
@@ -4165,13 +4257,19 @@ qla24xx_configure_vhba(scsi_qla_host_t *vha)
        uint16_t mb[MAILBOX_REGISTER_COUNT];
        struct qla_hw_data *ha = vha->hw;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
-       struct req_que *req = ha->req_q_map[vha->req_ques[0]];
-       struct rsp_que *rsp = req->rsp;
+       struct req_que *req;
+       struct rsp_que *rsp;
 
        if (!vha->vp_idx)
                return -EINVAL;
 
        rval = qla2x00_fw_ready(base_vha);
+       if (ql2xmultique_tag)
+               req = ha->req_q_map[0];
+       else
+               req = vha->req;
+       rsp = req->rsp;
+
        if (rval == QLA_SUCCESS) {
                clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
                qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
@@ -4305,7 +4403,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
        for (cnt = 0, chksum = 0; cnt < ha->nvram_size >> 2; cnt++)
                chksum += le32_to_cpu(*dptr++);
 
-       DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", ha->host_no));
+       DEBUG5(printk("scsi(%ld): Contents of NVRAM\n", vha->host_no));
        DEBUG5(qla2x00_dump_buffer((uint8_t *)nv, ha->nvram_size));
 
        /* Bad NVRAM data, set defaults parameters. */
@@ -4329,7 +4427,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
                nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
                nv->exchange_count = __constant_cpu_to_le16(0);
                nv->port_name[0] = 0x21;
-               nv->port_name[1] = 0x00 + PCI_FUNC(ha->pdev->devfn);
+               nv->port_name[1] = 0x00 + ha->port_no;
                nv->port_name[2] = 0x00;
                nv->port_name[3] = 0xe0;
                nv->port_name[4] = 0x8b;
@@ -4358,12 +4456,12 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
                nv->max_luns_per_target = __constant_cpu_to_le16(128);
                nv->port_down_retry_count = __constant_cpu_to_le16(30);
                nv->link_down_timeout = __constant_cpu_to_le16(30);
-               nv->enode_mac[0] = 0x01;
+               nv->enode_mac[0] = 0x00;
                nv->enode_mac[1] = 0x02;
                nv->enode_mac[2] = 0x03;
                nv->enode_mac[3] = 0x04;
                nv->enode_mac[4] = 0x05;
-               nv->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
+               nv->enode_mac[5] = 0x06 + ha->port_no;
 
                rval = 1;
        }
@@ -4396,7 +4494,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
                icb->enode_mac[2] = 0x03;
                icb->enode_mac[3] = 0x04;
                icb->enode_mac[4] = 0x05;
-               icb->enode_mac[5] = 0x06 + PCI_FUNC(ha->pdev->devfn);
+               icb->enode_mac[5] = 0x06 + ha->port_no;
        }
 
        /* Use extended-initialization control block. */
index a8abbb95730df7d39eda9721471f04f4af1dd8db..13396beae2cedcde63b00c6156b44696168e6cc0 100644 (file)
@@ -15,6 +15,7 @@ static request_t *qla2x00_req_pkt(struct scsi_qla_host *, struct req_que *,
                                                        struct rsp_que *rsp);
 static void qla2x00_isp_cmd(struct scsi_qla_host *, struct req_que *);
 
+static void qla25xx_set_que(srb_t *, struct rsp_que **);
 /**
  * qla2x00_get_cmd_direction() - Determine control_flag data direction.
  * @cmd: SCSI command
@@ -92,9 +93,10 @@ qla2x00_calc_iocbs_64(uint16_t dsds)
  * Returns a pointer to the Continuation Type 0 IOCB packet.
  */
 static inline cont_entry_t *
-qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
+qla2x00_prep_cont_type0_iocb(struct scsi_qla_host *vha)
 {
        cont_entry_t *cont_pkt;
+       struct req_que *req = vha->req;
        /* Adjust ring index. */
        req->ring_index++;
        if (req->ring_index == req->length) {
@@ -120,10 +122,11 @@ qla2x00_prep_cont_type0_iocb(struct req_que *req, struct scsi_qla_host *vha)
  * Returns a pointer to the continuation type 1 IOCB packet.
  */
 static inline cont_a64_entry_t *
-qla2x00_prep_cont_type1_iocb(struct req_que *req, scsi_qla_host_t *vha)
+qla2x00_prep_cont_type1_iocb(scsi_qla_host_t *vha)
 {
        cont_a64_entry_t *cont_pkt;
 
+       struct req_que *req = vha->req;
        /* Adjust ring index. */
        req->ring_index++;
        if (req->ring_index == req->length) {
@@ -159,7 +162,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
        struct scsi_cmnd *cmd;
        struct scatterlist *sg;
        int i;
-       struct req_que *req;
 
        cmd = sp->cmd;
 
@@ -174,8 +176,6 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
        }
 
        vha = sp->fcport->vha;
-       req = sp->que;
-
        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
        /* Three DSDs are available in the Command Type 2 IOCB */
@@ -192,7 +192,7 @@ void qla2x00_build_scsi_iocbs_32(srb_t *sp, cmd_entry_t *cmd_pkt,
                         * Seven DSDs are available in the Continuation
                         * Type 0 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type0_iocb(req, vha);
+                       cont_pkt = qla2x00_prep_cont_type0_iocb(vha);
                        cur_dsd = (uint32_t *)&cont_pkt->dseg_0_address;
                        avail_dsds = 7;
                }
@@ -220,7 +220,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
        struct scsi_cmnd *cmd;
        struct scatterlist *sg;
        int i;
-       struct req_que *req;
 
        cmd = sp->cmd;
 
@@ -235,8 +234,6 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
        }
 
        vha = sp->fcport->vha;
-       req = sp->que;
-
        cmd_pkt->control_flags |= cpu_to_le16(qla2x00_get_cmd_direction(sp));
 
        /* Two DSDs are available in the Command Type 3 IOCB */
@@ -254,7 +251,7 @@ void qla2x00_build_scsi_iocbs_64(srb_t *sp, cmd_entry_t *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -353,7 +350,6 @@ qla2x00_start_scsi(srb_t *sp)
        /* Build command packet */
        req->current_outstanding_cmd = handle;
        req->outstanding_cmds[handle] = sp;
-       sp->que = req;
        sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
        req->cnt -= req_cnt;
 
@@ -453,6 +449,7 @@ __qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
                        mrk24->lun[2] = MSB(lun);
                        host_to_fcp_swap(mrk24->lun, sizeof(mrk24->lun));
                        mrk24->vp_index = vha->vp_idx;
+                       mrk24->handle = MAKE_HANDLE(req->id, mrk24->handle);
                } else {
                        SET_TARGET_ID(ha, mrk->target, loop_id);
                        mrk->lun = cpu_to_le16(lun);
@@ -531,9 +528,6 @@ qla2x00_req_pkt(struct scsi_qla_host *vha, struct req_que *req,
                        for (cnt = 0; cnt < REQUEST_ENTRY_SIZE / 4; cnt++)
                                *dword_ptr++ = 0;
 
-                       /* Set system defined field. */
-                       pkt->sys_define = (uint8_t)req->ring_index;
-
                        /* Set entry count. */
                        pkt->entry_count = 1;
 
@@ -656,7 +650,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
        }
 
        vha = sp->fcport->vha;
-       req = sp->que;
+       req = vha->req;
 
        /* Set transfer direction */
        if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -687,7 +681,7 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
                         * Five DSDs are available in the Continuation
                         * Type 1 IOCB.
                         */
-                       cont_pkt = qla2x00_prep_cont_type1_iocb(req, vha);
+                       cont_pkt = qla2x00_prep_cont_type1_iocb(vha);
                        cur_dsd = (uint32_t *)cont_pkt->dseg_0_address;
                        avail_dsds = 5;
                }
@@ -724,19 +718,13 @@ qla24xx_start_scsi(srb_t *sp)
        struct scsi_cmnd *cmd = sp->cmd;
        struct scsi_qla_host *vha = sp->fcport->vha;
        struct qla_hw_data *ha = vha->hw;
-       uint16_t que_id;
 
        /* Setup device pointers. */
        ret = 0;
-       que_id = vha->req_ques[0];
 
-       req = ha->req_q_map[que_id];
-       sp->que = req;
+       qla25xx_set_que(sp, &rsp);
+       req = vha->req;
 
-       if (req->rsp)
-               rsp = req->rsp;
-       else
-               rsp = ha->rsp_q_map[que_id];
        /* So we know we haven't pci_map'ed anything yet */
        tot_dsds = 0;
 
@@ -794,7 +782,7 @@ qla24xx_start_scsi(srb_t *sp)
        req->cnt -= req_cnt;
 
        cmd_pkt = (struct cmd_type_7 *)req->ring_ptr;
-       cmd_pkt->handle = handle;
+       cmd_pkt->handle = MAKE_HANDLE(req->id, handle);
 
        /* Zero out remaining portion of packet. */
        /*    tagged queuing modifier -- default is TSK_SIMPLE (0). */
@@ -823,6 +811,8 @@ qla24xx_start_scsi(srb_t *sp)
 
        /* Set total data segment count. */
        cmd_pkt->entry_count = (uint8_t)req_cnt;
+       /* Specify response queue number where completion should happen */
+       cmd_pkt->entry_status = (uint8_t) rsp->id;
        wmb();
 
        /* Adjust ring index. */
@@ -842,7 +832,7 @@ qla24xx_start_scsi(srb_t *sp)
        /* Manage unprocessed RIO/ZIO commands in response queue. */
        if (vha->flags.process_response_queue &&
                rsp->ring_ptr->signature != RESPONSE_PROCESSED)
-               qla24xx_process_response_queue(rsp);
+               qla24xx_process_response_queue(vha, rsp);
 
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
        return QLA_SUCCESS;
@@ -855,3 +845,16 @@ queuing_error:
 
        return QLA_FUNCTION_FAILED;
 }
+
+static void qla25xx_set_que(srb_t *sp, struct rsp_que **rsp)
+{
+       struct scsi_cmnd *cmd = sp->cmd;
+       struct qla_hw_data *ha = sp->fcport->vha->hw;
+       int affinity = cmd->request->cpu;
+
+       if (ql2xmultique_tag && affinity >= 0 &&
+               affinity < ha->max_rsp_queues - 1)
+               *rsp = ha->rsp_q_map[affinity + 1];
+        else
+               *rsp = ha->rsp_q_map[0];
+}
index d04981848e561aed9e9c7853619f78c532d84174..c8d0a176fea4a1287853716a7dd1d6f20c910060 100644 (file)
@@ -13,10 +13,9 @@ static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
 static void qla2x00_process_completed_request(struct scsi_qla_host *,
        struct req_que *, uint32_t);
 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
-static void qla2x00_status_cont_entry(scsi_qla_host_t *, sts_cont_entry_t *);
+static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
        sts_entry_t *);
-static struct scsi_qla_host *qla2x00_get_rsp_host(struct rsp_que *);
 
 /**
  * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
@@ -51,7 +50,7 @@ qla2100_intr_handler(int irq, void *dev_id)
        status = 0;
 
        spin_lock(&ha->hardware_lock);
-       vha = qla2x00_get_rsp_host(rsp);
+       vha = pci_get_drvdata(ha->pdev);
        for (iter = 50; iter--; ) {
                hccr = RD_REG_WORD(&reg->hccr);
                if (hccr & HCCR_RISC_PAUSE) {
@@ -147,7 +146,7 @@ qla2300_intr_handler(int irq, void *dev_id)
        status = 0;
 
        spin_lock(&ha->hardware_lock);
-       vha = qla2x00_get_rsp_host(rsp);
+       vha = pci_get_drvdata(ha->pdev);
        for (iter = 50; iter--; ) {
                stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
                if (stat & HSR_RISC_PAUSED) {
@@ -685,7 +684,7 @@ skip_rio:
                    vha->host_no));
 
                if (IS_FWI2_CAPABLE(ha))
-                       qla24xx_process_response_queue(rsp);
+                       qla24xx_process_response_queue(vha, rsp);
                else
                        qla2x00_process_response_queue(rsp);
                break;
@@ -766,7 +765,10 @@ qla2x00_adjust_sdev_qdepth_up(struct scsi_device *sdev, void *data)
        struct qla_hw_data *ha = vha->hw;
        struct req_que *req = NULL;
 
-       req = ha->req_q_map[vha->req_ques[0]];
+       if (!ql2xqfulltracking)
+               return;
+
+       req = vha->req;
        if (!req)
                return;
        if (req->max_q_depth <= sdev->queue_depth)
@@ -808,6 +810,9 @@ qla2x00_ramp_up_queue_depth(scsi_qla_host_t *vha, struct req_que *req,
        fc_port_t *fcport;
        struct scsi_device *sdev;
 
+       if (!ql2xqfulltracking)
+               return;
+
        sdev = sp->cmd->device;
        if (sdev->queue_depth >= req->max_q_depth)
                return;
@@ -858,8 +863,8 @@ qla2x00_process_completed_request(struct scsi_qla_host *vha,
                qla2x00_ramp_up_queue_depth(vha, req, sp);
                qla2x00_sp_compl(ha, sp);
        } else {
-               DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
-                   vha->host_no));
+               DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
+                       " handle(%d)\n", vha->host_no, req->id, index));
                qla_printk(KERN_WARNING, ha,
                    "Invalid ISP SCSI completion handle\n");
 
@@ -881,7 +886,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
        uint16_t        handle_cnt;
        uint16_t        cnt;
 
-       vha = qla2x00_get_rsp_host(rsp);
+       vha = pci_get_drvdata(ha->pdev);
 
        if (!vha->flags.online)
                return;
@@ -926,7 +931,7 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
                        }
                        break;
                case STATUS_CONT_TYPE:
-                       qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
+                       qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
                        break;
                default:
                        /* Type Not Supported. */
@@ -945,7 +950,8 @@ qla2x00_process_response_queue(struct rsp_que *rsp)
 }
 
 static inline void
-qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
+qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len,
+       struct rsp_que *rsp)
 {
        struct scsi_cmnd *cp = sp->cmd;
 
@@ -962,7 +968,7 @@ qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t sense_len)
        sp->request_sense_ptr += sense_len;
        sp->request_sense_length -= sense_len;
        if (sp->request_sense_length != 0)
-               sp->fcport->vha->status_srb = sp;
+               rsp->status_srb = sp;
 
        DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
            "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
@@ -992,7 +998,9 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        uint32_t        sense_len, rsp_info_len, resid_len, fw_resid_len;
        uint8_t         *rsp_info, *sense_data;
        struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = rsp->req;
+       uint32_t handle;
+       uint16_t que;
+       struct req_que *req;
 
        sts = (sts_entry_t *) pkt;
        sts24 = (struct sts_entry_24xx *) pkt;
@@ -1003,18 +1011,20 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                comp_status = le16_to_cpu(sts->comp_status);
                scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
        }
-
+       handle = (uint32_t) LSW(sts->handle);
+       que = MSW(sts->handle);
+       req = ha->req_q_map[que];
        /* Fast path completion. */
        if (comp_status == CS_COMPLETE && scsi_status == 0) {
-               qla2x00_process_completed_request(vha, req, sts->handle);
+               qla2x00_process_completed_request(vha, req, handle);
 
                return;
        }
 
        /* Validate handle. */
-       if (sts->handle < MAX_OUTSTANDING_COMMANDS) {
-               sp = req->outstanding_cmds[sts->handle];
-               req->outstanding_cmds[sts->handle] = NULL;
+       if (handle < MAX_OUTSTANDING_COMMANDS) {
+               sp = req->outstanding_cmds[handle];
+               req->outstanding_cmds[handle] = NULL;
        } else
                sp = NULL;
 
@@ -1030,7 +1040,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        cp = sp->cmd;
        if (cp == NULL) {
                DEBUG2(printk("scsi(%ld): Command already returned back to OS "
-                   "pkt->handle=%d sp=%p.\n", vha->host_no, sts->handle, sp));
+                   "pkt->handle=%d sp=%p.\n", vha->host_no, handle, sp));
                qla_printk(KERN_WARNING, ha,
                    "Command is NULL: already returned to OS (sp=%p)\n", sp);
 
@@ -1121,6 +1131,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                            scsi_status));
 
                        /* Adjust queue depth for all luns on the port. */
+                       if (!ql2xqfulltracking)
+                               break;
                        fcport->last_queue_full = jiffies;
                        starget_for_each_device(cp->device->sdev_target,
                            fcport, qla2x00_adjust_sdev_qdepth_down);
@@ -1133,7 +1145,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                if (!(scsi_status & SS_SENSE_LEN_VALID))
                        break;
 
-               qla2x00_handle_sense(sp, sense_data, sense_len);
+               qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
                break;
 
        case CS_DATA_UNDERRUN:
@@ -1179,6 +1191,8 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                 * Adjust queue depth for all luns on the
                                 * port.
                                 */
+                               if (!ql2xqfulltracking)
+                                       break;
                                fcport->last_queue_full = jiffies;
                                starget_for_each_device(
                                    cp->device->sdev_target, fcport,
@@ -1192,12 +1206,12 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                        if (!(scsi_status & SS_SENSE_LEN_VALID))
                                break;
 
-                       qla2x00_handle_sense(sp, sense_data, sense_len);
+                       qla2x00_handle_sense(sp, sense_data, sense_len, rsp);
                } else {
                        /*
                         * If RISC reports underrun and target does not report
                         * it then we must have a lost frame, so tell upper
-                        * layer to retry it by reporting a bus busy.
+                        * layer to retry it by reporting an error.
                         */
                        if (!(scsi_status & SS_RESIDUAL_UNDER)) {
                                DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
@@ -1207,7 +1221,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
                                        cp->device->id, cp->device->lun, resid,
                                        scsi_bufflen(cp)));
 
-                               cp->result = DID_BUS_BUSY << 16;
+                               cp->result = DID_ERROR << 16;
                                break;
                        }
 
@@ -1334,7 +1348,7 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
        }
 
        /* Place command on done queue. */
-       if (vha->status_srb == NULL)
+       if (rsp->status_srb == NULL)
                qla2x00_sp_compl(ha, sp);
 }
 
@@ -1346,11 +1360,11 @@ qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
  * Extended sense data.
  */
 static void
-qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
+qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
 {
        uint8_t         sense_sz = 0;
-       struct qla_hw_data *ha = vha->hw;
-       srb_t           *sp = vha->status_srb;
+       struct qla_hw_data *ha = rsp->hw;
+       srb_t           *sp = rsp->status_srb;
        struct scsi_cmnd *cp;
 
        if (sp != NULL && sp->request_sense_length != 0) {
@@ -1362,7 +1376,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
                            "cmd is NULL: already returned to OS (sp=%p)\n",
                            sp);
 
-                       vha->status_srb = NULL;
+                       rsp->status_srb = NULL;
                        return;
                }
 
@@ -1383,7 +1397,7 @@ qla2x00_status_cont_entry(scsi_qla_host_t *vha, sts_cont_entry_t *pkt)
 
                /* Place command on done queue. */
                if (sp->request_sense_length == 0) {
-                       vha->status_srb = NULL;
+                       rsp->status_srb = NULL;
                        qla2x00_sp_compl(ha, sp);
                }
        }
@@ -1399,7 +1413,9 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
 {
        srb_t *sp;
        struct qla_hw_data *ha = vha->hw;
-       struct req_que *req = rsp->req;
+       uint32_t handle = LSW(pkt->handle);
+       uint16_t que = MSW(pkt->handle);
+       struct req_que *req = ha->req_q_map[que];
 #if defined(QL_DEBUG_LEVEL_2)
        if (pkt->entry_status & RF_INV_E_ORDER)
                qla_printk(KERN_ERR, ha, "%s: Invalid Entry Order\n", __func__);
@@ -1417,14 +1433,14 @@ qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
 #endif
 
        /* Validate handle. */
-       if (pkt->handle < MAX_OUTSTANDING_COMMANDS)
-               sp = req->outstanding_cmds[pkt->handle];
+       if (handle < MAX_OUTSTANDING_COMMANDS)
+               sp = req->outstanding_cmds[handle];
        else
                sp = NULL;
 
        if (sp) {
                /* Free outstanding command slot. */
-               req->outstanding_cmds[pkt->handle] = NULL;
+               req->outstanding_cmds[handle] = NULL;
 
                /* Bad payload or header */
                if (pkt->entry_status &
@@ -1486,13 +1502,10 @@ qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
  * qla24xx_process_response_queue() - Process response queue entries.
  * @ha: SCSI driver HA context
  */
-void
-qla24xx_process_response_queue(struct rsp_que *rsp)
+void qla24xx_process_response_queue(struct scsi_qla_host *vha,
+       struct rsp_que *rsp)
 {
        struct sts_entry_24xx *pkt;
-       struct scsi_qla_host *vha;
-
-       vha = qla2x00_get_rsp_host(rsp);
 
        if (!vha->flags.online)
                return;
@@ -1523,7 +1536,7 @@ qla24xx_process_response_queue(struct rsp_que *rsp)
                        qla2x00_status_entry(vha, rsp, pkt);
                        break;
                case STATUS_CONT_TYPE:
-                       qla2x00_status_cont_entry(vha, (sts_cont_entry_t *)pkt);
+                       qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
                        break;
                case VP_RPT_ID_IOCB_TYPE:
                        qla24xx_report_id_acquisition(vha,
@@ -1626,7 +1639,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
        status = 0;
 
        spin_lock(&ha->hardware_lock);
-       vha = qla2x00_get_rsp_host(rsp);
+       vha = pci_get_drvdata(ha->pdev);
        for (iter = 50; iter--; ) {
                stat = RD_REG_DWORD(&reg->host_status);
                if (stat & HSRX_RISC_PAUSED) {
@@ -1664,7 +1677,7 @@ qla24xx_intr_handler(int irq, void *dev_id)
                        break;
                case 0x13:
                case 0x14:
-                       qla24xx_process_response_queue(rsp);
+                       qla24xx_process_response_queue(vha, rsp);
                        break;
                default:
                        DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1692,6 +1705,7 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
        struct qla_hw_data *ha;
        struct rsp_que *rsp;
        struct device_reg_24xx __iomem *reg;
+       struct scsi_qla_host *vha;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -1704,7 +1718,8 @@ qla24xx_msix_rsp_q(int irq, void *dev_id)
 
        spin_lock_irq(&ha->hardware_lock);
 
-       qla24xx_process_response_queue(rsp);
+       vha = qla25xx_get_host(rsp);
+       qla24xx_process_response_queue(vha, rsp);
        WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
 
        spin_unlock_irq(&ha->hardware_lock);
@@ -1717,7 +1732,6 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
 {
        struct qla_hw_data *ha;
        struct rsp_que *rsp;
-       struct device_reg_24xx __iomem *reg;
 
        rsp = (struct rsp_que *) dev_id;
        if (!rsp) {
@@ -1726,13 +1740,8 @@ qla25xx_msix_rsp_q(int irq, void *dev_id)
                return IRQ_NONE;
        }
        ha = rsp->hw;
-       reg = &ha->iobase->isp24;
 
-       spin_lock_irq(&ha->hardware_lock);
-
-       qla24xx_process_response_queue(rsp);
-
-       spin_unlock_irq(&ha->hardware_lock);
+       queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
 
        return IRQ_HANDLED;
 }
@@ -1760,7 +1769,7 @@ qla24xx_msix_default(int irq, void *dev_id)
        status = 0;
 
        spin_lock_irq(&ha->hardware_lock);
-       vha = qla2x00_get_rsp_host(rsp);
+       vha = pci_get_drvdata(ha->pdev);
        do {
                stat = RD_REG_DWORD(&reg->host_status);
                if (stat & HSRX_RISC_PAUSED) {
@@ -1798,7 +1807,7 @@ qla24xx_msix_default(int irq, void *dev_id)
                        break;
                case 0x13:
                case 0x14:
-                       qla24xx_process_response_queue(rsp);
+                       qla24xx_process_response_queue(vha, rsp);
                        break;
                default:
                        DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
@@ -1822,31 +1831,14 @@ qla24xx_msix_default(int irq, void *dev_id)
 /* Interrupt handling helpers. */
 
 struct qla_init_msix_entry {
-       uint16_t entry;
-       uint16_t index;
        const char *name;
        irq_handler_t handler;
 };
 
-static struct qla_init_msix_entry base_queue = {
-       .entry = 0,
-       .index = 0,
-       .name = "qla2xxx (default)",
-       .handler = qla24xx_msix_default,
-};
-
-static struct qla_init_msix_entry base_rsp_queue = {
-       .entry = 1,
-       .index = 1,
-       .name = "qla2xxx (rsp_q)",
-       .handler = qla24xx_msix_rsp_q,
-};
-
-static struct qla_init_msix_entry multi_rsp_queue = {
-       .entry = 1,
-       .index = 1,
-       .name = "qla2xxx (multi_q)",
-       .handler = qla25xx_msix_rsp_q,
+static struct qla_init_msix_entry msix_entries[3] = {
+       { "qla2xxx (default)", qla24xx_msix_default },
+       { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
+       { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
 };
 
 static void
@@ -1873,7 +1865,6 @@ qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
        int i, ret;
        struct msix_entry *entries;
        struct qla_msix_entry *qentry;
-       struct qla_init_msix_entry *msix_queue;
 
        entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
                                        GFP_KERNEL);
@@ -1900,7 +1891,7 @@ msix_failed:
                                ha->msix_count, ret);
                        goto msix_out;
                }
-               ha->max_queues = ha->msix_count - 1;
+               ha->max_rsp_queues = ha->msix_count - 1;
        }
        ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
                                ha->msix_count, GFP_KERNEL);
@@ -1918,45 +1909,27 @@ msix_failed:
                qentry->rsp = NULL;
        }
 
-       /* Enable MSI-X for AENs for queue 0 */
-       qentry = &ha->msix_entries[0];
-       ret = request_irq(qentry->vector, base_queue.handler, 0,
-                                       base_queue.name, rsp);
-       if (ret) {
-               qla_printk(KERN_WARNING, ha,
+       /* Enable MSI-X vectors for the base queue */
+       for (i = 0; i < 2; i++) {
+               qentry = &ha->msix_entries[i];
+               ret = request_irq(qentry->vector, msix_entries[i].handler,
+                                       0, msix_entries[i].name, rsp);
+               if (ret) {
+                       qla_printk(KERN_WARNING, ha,
                        "MSI-X: Unable to register handler -- %x/%d.\n",
                        qentry->vector, ret);
-               qla24xx_disable_msix(ha);
-               goto msix_out;
+                       qla24xx_disable_msix(ha);
+                       ha->mqenable = 0;
+                       goto msix_out;
+               }
+               qentry->have_irq = 1;
+               qentry->rsp = rsp;
+               rsp->msix = qentry;
        }
-       qentry->have_irq = 1;
-       qentry->rsp = rsp;
 
        /* Enable MSI-X vector for response queue update for queue 0 */
-       if (ha->max_queues > 1 && ha->mqiobase) {
+       if (ha->mqiobase &&  (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
                ha->mqenable = 1;
-               msix_queue = &multi_rsp_queue;
-               qla_printk(KERN_INFO, ha,
-                               "MQ enabled, Number of Queue Resources: %d \n",
-                               ha->max_queues);
-       } else {
-               ha->mqenable = 0;
-               msix_queue = &base_rsp_queue;
-       }
-
-       qentry = &ha->msix_entries[1];
-       ret = request_irq(qentry->vector, msix_queue->handler, 0,
-                                               msix_queue->name, rsp);
-       if (ret) {
-               qla_printk(KERN_WARNING, ha,
-                       "MSI-X: Unable to register handler -- %x/%d.\n",
-                       qentry->vector, ret);
-               qla24xx_disable_msix(ha);
-               ha->mqenable = 0;
-               goto msix_out;
-       }
-       qentry->have_irq = 1;
-       qentry->rsp = rsp;
 
 msix_out:
        kfree(entries);
@@ -2063,35 +2036,11 @@ qla2x00_free_irqs(scsi_qla_host_t *vha)
        }
 }
 
-static struct scsi_qla_host *
-qla2x00_get_rsp_host(struct rsp_que *rsp)
-{
-       srb_t *sp;
-       struct qla_hw_data *ha = rsp->hw;
-       struct scsi_qla_host *vha = NULL;
-       struct sts_entry_24xx *pkt;
-       struct req_que *req;
-
-       if (rsp->id) {
-               pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
-               req = rsp->req;
-               if (pkt && pkt->handle < MAX_OUTSTANDING_COMMANDS) {
-                       sp = req->outstanding_cmds[pkt->handle];
-                       if (sp)
-                               vha = sp->fcport->vha;
-               }
-       }
-       if (!vha)
-       /* handle it in base queue */
-               vha = pci_get_drvdata(ha->pdev);
-
-       return vha;
-}
 
 int qla25xx_request_irq(struct rsp_que *rsp)
 {
        struct qla_hw_data *ha = rsp->hw;
-       struct qla_init_msix_entry *intr = &multi_rsp_queue;
+       struct qla_init_msix_entry *intr = &msix_entries[2];
        struct qla_msix_entry *msix = rsp->msix;
        int ret;
 
@@ -2106,3 +2055,30 @@ int qla25xx_request_irq(struct rsp_que *rsp)
        msix->rsp = rsp;
        return ret;
 }
+
+struct scsi_qla_host *
+qla25xx_get_host(struct rsp_que *rsp)
+{
+       srb_t *sp;
+       struct qla_hw_data *ha = rsp->hw;
+       struct scsi_qla_host *vha = NULL;
+       struct sts_entry_24xx *pkt;
+       struct req_que *req;
+       uint16_t que;
+       uint32_t handle;
+
+       pkt = (struct sts_entry_24xx *) rsp->ring_ptr;
+       que = MSW(pkt->handle);
+       handle = (uint32_t) LSW(pkt->handle);
+       req = ha->req_q_map[que];
+       if (handle < MAX_OUTSTANDING_COMMANDS) {
+               sp = req->outstanding_cmds[handle];
+               if (sp)
+                       return  sp->fcport->vha;
+               else
+                       goto base_que;
+       }
+base_que:
+       vha = pci_get_drvdata(ha->pdev);
+       return vha;
+}
index e67c1660bf4671aacf54c43e35dca5182b81fc33..451ece0760b0f6ea49bb7884f5f95b2ac5528783 100644 (file)
@@ -408,7 +408,7 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
  * Context:
  *     Kernel context.
  */
-void
+int
 qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
     uint16_t *subminor, uint16_t *attributes, uint32_t *memory, uint8_t *mpi,
     uint32_t *mpi_caps, uint8_t *phy)
@@ -427,6 +427,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
        mcp->flags = 0;
        mcp->tov = MBX_TOV_SECONDS;
        rval = qla2x00_mailbox_command(vha, mcp);
+       if (rval != QLA_SUCCESS)
+               goto failed;
 
        /* Return mailbox data. */
        *major = mcp->mb[1];
@@ -446,7 +448,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
                phy[1] = mcp->mb[9] >> 8;
                phy[2] = mcp->mb[9] & 0xff;
        }
-
+failed:
        if (rval != QLA_SUCCESS) {
                /*EMPTY*/
                DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
@@ -455,6 +457,7 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha, uint16_t *major, uint16_t *minor,
                /*EMPTY*/
                DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
        }
+       return rval;
 }
 
 /*
@@ -748,20 +751,20 @@ qla2x00_issue_iocb(scsi_qla_host_t *vha, void *buffer, dma_addr_t phys_addr,
  *     Kernel context.
  */
 int
-qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
+qla2x00_abort_command(srb_t *sp)
 {
        unsigned long   flags = 0;
-       fc_port_t       *fcport;
        int             rval;
        uint32_t        handle = 0;
        mbx_cmd_t       mc;
        mbx_cmd_t       *mcp = &mc;
+       fc_port_t       *fcport = sp->fcport;
+       scsi_qla_host_t *vha = fcport->vha;
        struct qla_hw_data *ha = vha->hw;
+       struct req_que *req = vha->req;
 
        DEBUG11(printk("qla2x00_abort_command(%ld): entered.\n", vha->host_no));
 
-       fcport = sp->fcport;
-
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
                if (req->outstanding_cmds[handle] == sp)
@@ -800,7 +803,7 @@ qla2x00_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
 }
 
 int
-qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
+qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
 {
        int rval, rval2;
        mbx_cmd_t  mc;
@@ -813,8 +816,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
 
        l = l;
        vha = fcport->vha;
-       req = vha->hw->req_q_map[0];
-       rsp = vha->hw->rsp_q_map[0];
+       req = vha->hw->req_q_map[tag];
+       rsp = vha->hw->rsp_q_map[tag];
        mcp->mb[0] = MBC_ABORT_TARGET;
        mcp->out_mb = MBX_9|MBX_2|MBX_1|MBX_0;
        if (HAS_EXTENDED_IDS(vha->hw)) {
@@ -850,7 +853,7 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l)
 }
 
 int
-qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
+qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
 {
        int rval, rval2;
        mbx_cmd_t  mc;
@@ -862,8 +865,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l)
        DEBUG11(printk("%s(%ld): entered.\n", __func__, fcport->vha->host_no));
 
        vha = fcport->vha;
-       req = vha->hw->req_q_map[0];
-       rsp = vha->hw->rsp_q_map[0];
+       req = vha->hw->req_q_map[tag];
+       rsp = vha->hw->rsp_q_map[tag];
        mcp->mb[0] = MBC_LUN_RESET;
        mcp->out_mb = MBX_9|MBX_3|MBX_2|MBX_1|MBX_0;
        if (HAS_EXTENDED_IDS(vha->hw))
@@ -931,6 +934,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
        mcp->mb[9] = vha->vp_idx;
        mcp->out_mb = MBX_9|MBX_0;
        mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+       if (IS_QLA81XX(vha->hw))
+               mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
@@ -952,9 +957,19 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
                DEBUG2_3_11(printk("qla2x00_get_adapter_id(%ld): failed=%x.\n",
                    vha->host_no, rval));
        } else {
-               /*EMPTY*/
                DEBUG11(printk("qla2x00_get_adapter_id(%ld): done.\n",
                    vha->host_no));
+
+               if (IS_QLA81XX(vha->hw)) {
+                       vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
+                       vha->fcoe_fcf_idx = mcp->mb[10];
+                       vha->fcoe_vn_port_mac[5] = mcp->mb[11] >> 8;
+                       vha->fcoe_vn_port_mac[4] = mcp->mb[11] & 0xff;
+                       vha->fcoe_vn_port_mac[3] = mcp->mb[12] >> 8;
+                       vha->fcoe_vn_port_mac[2] = mcp->mb[12] & 0xff;
+                       vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
+                       vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
+               }
        }
 
        return rval;
@@ -1252,7 +1267,7 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
 
        mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
        mcp->out_mb = MBX_0;
-       mcp->in_mb = MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->tov = MBX_TOV_SECONDS;
        mcp->flags = 0;
        rval = qla2x00_mailbox_command(vha, mcp);
@@ -1261,6 +1276,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
        states[0] = mcp->mb[1];
        states[1] = mcp->mb[2];
        states[2] = mcp->mb[3];
+       states[3] = mcp->mb[4];
+       states[4] = mcp->mb[5];
 
        if (rval != QLA_SUCCESS) {
                /*EMPTY*/
@@ -1480,9 +1497,17 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        dma_addr_t      lg_dma;
        uint32_t        iop[2];
        struct qla_hw_data *ha = vha->hw;
+       struct req_que *req;
+       struct rsp_que *rsp;
 
        DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
+       if (ql2xmultique_tag)
+               req = ha->req_q_map[0];
+       else
+               req = vha->req;
+       rsp = req->rsp;
+
        lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
        if (lg == NULL) {
                DEBUG2_3(printk("%s(%ld): failed to allocate Login IOCB.\n",
@@ -1493,6 +1518,7 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
 
        lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        lg->entry_count = 1;
+       lg->handle = MAKE_HANDLE(req->id, lg->handle);
        lg->nport_handle = cpu_to_le16(loop_id);
        lg->control_flags = __constant_cpu_to_le16(LCF_COMMAND_PLOGI);
        if (opt & BIT_0)
@@ -1741,6 +1767,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        struct logio_entry_24xx *lg;
        dma_addr_t      lg_dma;
        struct qla_hw_data *ha = vha->hw;
+       struct req_que *req;
+       struct rsp_que *rsp;
 
        DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
@@ -1752,8 +1780,14 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
        }
        memset(lg, 0, sizeof(struct logio_entry_24xx));
 
+       if (ql2xmaxqueues > 1)
+               req = ha->req_q_map[0];
+       else
+               req = vha->req;
+       rsp = req->rsp;
        lg->entry_type = LOGINOUT_PORT_IOCB_TYPE;
        lg->entry_count = 1;
+       lg->handle = MAKE_HANDLE(req->id, lg->handle);
        lg->nport_handle = cpu_to_le16(loop_id);
        lg->control_flags =
            __constant_cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
@@ -1864,9 +1898,6 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
        mbx_cmd_t mc;
        mbx_cmd_t *mcp = &mc;
 
-       if (IS_QLA81XX(vha->hw))
-           return QLA_SUCCESS;
-
        DEBUG11(printk("qla2x00_full_login_lip(%ld): entered.\n",
            vha->host_no));
 
@@ -2195,21 +2226,21 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
 }
 
 int
-qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
+qla24xx_abort_command(srb_t *sp)
 {
        int             rval;
-       fc_port_t       *fcport;
        unsigned long   flags = 0;
 
        struct abort_entry_24xx *abt;
        dma_addr_t      abt_dma;
        uint32_t        handle;
+       fc_port_t       *fcport = sp->fcport;
+       struct scsi_qla_host *vha = fcport->vha;
        struct qla_hw_data *ha = vha->hw;
+       struct req_que *req = vha->req;
 
        DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
-       fcport = sp->fcport;
-
        spin_lock_irqsave(&ha->hardware_lock, flags);
        for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
                if (req->outstanding_cmds[handle] == sp)
@@ -2231,6 +2262,7 @@ qla24xx_abort_command(scsi_qla_host_t *vha, srb_t *sp, struct req_que *req)
 
        abt->entry_type = ABORT_IOCB_TYPE;
        abt->entry_count = 1;
+       abt->handle = MAKE_HANDLE(req->id, abt->handle);
        abt->nport_handle = cpu_to_le16(fcport->loop_id);
        abt->handle_to_abort = handle;
        abt->port_id[0] = fcport->d_id.b.al_pa;
@@ -2272,7 +2304,7 @@ struct tsk_mgmt_cmd {
 
 static int
 __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
-    unsigned int l)
+    unsigned int l, int tag)
 {
        int             rval, rval2;
        struct tsk_mgmt_cmd *tsk;
@@ -2286,8 +2318,11 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
 
        vha = fcport->vha;
        ha = vha->hw;
-       req = ha->req_q_map[0];
-       rsp = ha->rsp_q_map[0];
+       req = vha->req;
+       if (ql2xmultique_tag)
+               rsp = ha->rsp_q_map[tag + 1];
+       else
+               rsp = req->rsp;
        tsk = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &tsk_dma);
        if (tsk == NULL) {
                DEBUG2_3(printk("%s(%ld): failed to allocate Task Management "
@@ -2298,6 +2333,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
 
        tsk->p.tsk.entry_type = TSK_MGMT_IOCB_TYPE;
        tsk->p.tsk.entry_count = 1;
+       tsk->p.tsk.handle = MAKE_HANDLE(req->id, tsk->p.tsk.handle);
        tsk->p.tsk.nport_handle = cpu_to_le16(fcport->loop_id);
        tsk->p.tsk.timeout = cpu_to_le16(ha->r_a_tov / 10 * 2);
        tsk->p.tsk.control_flags = cpu_to_le32(type);
@@ -2344,15 +2380,15 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
 }
 
 int
-qla24xx_abort_target(struct fc_port *fcport, unsigned int l)
+qla24xx_abort_target(struct fc_port *fcport, unsigned int l, int tag)
 {
-       return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l);
+       return __qla24xx_issue_tmf("Target", TCF_TARGET_RESET, fcport, l, tag);
 }
 
 int
-qla24xx_lun_reset(struct fc_port *fcport, unsigned int l)
+qla24xx_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
 {
-       return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l);
+       return __qla24xx_issue_tmf("Lun", TCF_LUN_RESET, fcport, l, tag);
 }
 
 int
@@ -2446,6 +2482,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
        if (rval != QLA_SUCCESS) {
                DEBUG2_3_11(printk("%s(%ld): failed=%x.\n", __func__,
                    vha->host_no, rval));
+               if (mcp->mb[0] == MBS_INVALID_COMMAND)
+                       rval = QLA_INVALID_COMMAND;
        } else {
                DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
        }
@@ -2717,8 +2755,11 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
                if (vp_idx == 0)
                        return;
 
-               if (MSB(stat) == 1)
+               if (MSB(stat) == 1) {
+                       DEBUG2(printk("scsi(%ld): Could not acquire ID for "
+                           "VP[%d].\n", vha->host_no, vp_idx));
                        return;
+               }
 
                list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
                        if (vp_idx == vp->vp_idx)
@@ -3141,6 +3182,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
                WRT_REG_DWORD(&reg->req_q_in, 0);
                WRT_REG_DWORD(&reg->req_q_out, 0);
        }
+       req->req_q_in = &reg->req_q_in;
+       req->req_q_out = &reg->req_q_out;
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
        rval = qla2x00_mailbox_command(vha, mcp);
@@ -3167,7 +3210,6 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        mcp->mb[6] = MSW(MSD(rsp->dma));
        mcp->mb[7] = LSW(MSD(rsp->dma));
        mcp->mb[5] = rsp->length;
-       mcp->mb[11] = rsp->vp_idx;
        mcp->mb[14] = rsp->msix->entry;
        mcp->mb[13] = rsp->rid;
 
@@ -3179,7 +3221,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
        mcp->mb[8] = 0;
        /* que out ptr index */
        mcp->mb[9] = 0;
-       mcp->out_mb = MBX_14|MBX_13|MBX_12|MBX_11|MBX_10|MBX_9|MBX_8|MBX_7
+       mcp->out_mb = MBX_14|MBX_13|MBX_9|MBX_8|MBX_7
                        |MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
        mcp->in_mb = MBX_0;
        mcp->flags = MBX_DMA_OUT;
@@ -3384,7 +3426,7 @@ qla2x00_read_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
                DEBUG2_3_11(printk("%s(%ld): failed=%x (%x).\n", __func__,
                    vha->host_no, rval, mcp->mb[0]));
        } else {
-               DEBUG11(printk("%s(%ld): done.\n", __func__, ha->host_no));
+               DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
        }
 
        return rval;
@@ -3428,3 +3470,141 @@ qla2x00_write_edc(scsi_qla_host_t *vha, uint16_t dev, uint16_t adr,
 
        return rval;
 }
+
+int
+qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
+    uint16_t size_in_bytes, uint16_t *actual_size)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_QLA81XX(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+       mcp->mb[0] = MBC_GET_XGMAC_STATS;
+       mcp->mb[2] = MSW(stats_dma);
+       mcp->mb[3] = LSW(stats_dma);
+       mcp->mb[6] = MSW(MSD(stats_dma));
+       mcp->mb[7] = LSW(MSD(stats_dma));
+       mcp->mb[8] = size_in_bytes >> 2;
+       mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_0;
+       mcp->in_mb = MBX_2|MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
+                   "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
+                   mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+       } else {
+               DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+
+               *actual_size = mcp->mb[2] << 2;
+       }
+
+       return rval;
+}
+
+int
+qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
+    uint16_t size)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_QLA81XX(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+       mcp->mb[0] = MBC_GET_DCBX_PARAMS;
+       mcp->mb[1] = 0;
+       mcp->mb[2] = MSW(tlv_dma);
+       mcp->mb[3] = LSW(tlv_dma);
+       mcp->mb[6] = MSW(MSD(tlv_dma));
+       mcp->mb[7] = LSW(MSD(tlv_dma));
+       mcp->mb[8] = size;
+       mcp->out_mb = MBX_8|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_2|MBX_1|MBX_0;
+       mcp->tov = MBX_TOV_SECONDS;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+
+       if (rval != QLA_SUCCESS) {
+               DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=0x%x "
+                   "mb[1]=0x%x mb[2]=0x%x.\n", __func__, vha->host_no, rval,
+                   mcp->mb[0], mcp->mb[1], mcp->mb[2]));
+       } else {
+               DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+       }
+
+       return rval;
+}
+
+int
+qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_FWI2_CAPABLE(vha->hw))
+               return QLA_FUNCTION_FAILED;
+
+       DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+       mcp->mb[0] = MBC_READ_RAM_EXTENDED;
+       mcp->mb[1] = LSW(risc_addr);
+       mcp->mb[8] = MSW(risc_addr);
+       mcp->out_mb = MBX_8|MBX_1|MBX_0;
+       mcp->in_mb = MBX_3|MBX_2|MBX_0;
+       mcp->tov = 30;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+       if (rval != QLA_SUCCESS) {
+               DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+                   vha->host_no, rval, mcp->mb[0]));
+       } else {
+               DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+               *data = mcp->mb[3] << 16 | mcp->mb[2];
+       }
+
+       return rval;
+}
+
+int
+qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
+{
+       int rval;
+       mbx_cmd_t mc;
+       mbx_cmd_t *mcp = &mc;
+
+       if (!IS_FWI2_CAPABLE(vha->hw))
+                return QLA_FUNCTION_FAILED;
+
+       DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
+
+       mcp->mb[0] = MBC_WRITE_RAM_WORD_EXTENDED;
+       mcp->mb[1] = LSW(risc_addr);
+       mcp->mb[2] = LSW(data);
+       mcp->mb[3] = MSW(data);
+       mcp->mb[8] = MSW(risc_addr);
+       mcp->out_mb = MBX_8|MBX_3|MBX_2|MBX_1|MBX_0;
+       mcp->in_mb = MBX_0;
+       mcp->tov = 30;
+       mcp->flags = 0;
+       rval = qla2x00_mailbox_command(vha, mcp);
+       if (rval != QLA_SUCCESS) {
+               DEBUG2_3_11(printk("%s(%ld): failed=%x mb[0]=%x.\n", __func__,
+                   vha->host_no, rval, mcp->mb[0]));
+       } else {
+               DEBUG11(printk("%s(%ld): done.\n", __func__, vha->host_no));
+       }
+
+       return rval;
+}
index 51716c7e30083495ebcc59ed387fd498472ac181..650bcef08f2a4d080f5e325a7fd82766179aeb7b 100644 (file)
@@ -398,9 +398,8 @@ qla24xx_create_vhost(struct fc_vport *fc_vport)
 
        qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL);
 
-       memset(vha->req_ques, 0, sizeof(vha->req_ques));
-       vha->req_ques[0] = ha->req_q_map[0]->id;
-       host->can_queue = ha->req_q_map[0]->length + 128;
+       vha->req = base_vha->req;
+       host->can_queue = base_vha->req->length + 128;
        host->this_id = 255;
        host->cmd_per_lun = 3;
        host->max_cmd_len = MAX_CMDSZ;
@@ -515,76 +514,53 @@ int qla25xx_update_req_que(struct scsi_qla_host *vha, uint8_t que, uint8_t qos)
 
 /* Delete all queues for a given vhost */
 int
-qla25xx_delete_queues(struct scsi_qla_host *vha, uint8_t que_no)
+qla25xx_delete_queues(struct scsi_qla_host *vha)
 {
        int cnt, ret = 0;
        struct req_que *req = NULL;
        struct rsp_que *rsp = NULL;
        struct qla_hw_data *ha = vha->hw;
 
-       if (que_no) {
-       /* Delete request queue */
-               req = ha->req_q_map[que_no];
+       /* Delete request queues */
+       for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
+               req = ha->req_q_map[cnt];
                if (req) {
-                       rsp = req->rsp;
                        ret = qla25xx_delete_req_que(vha, req);
                        if (ret != QLA_SUCCESS) {
                                qla_printk(KERN_WARNING, ha,
-                               "Couldn't delete req que %d\n", req->id);
+                               "Couldn't delete req que %d\n",
+                               req->id);
                                return ret;
                        }
-                       /* Delete associated response queue */
-                       if (rsp) {
-                               ret = qla25xx_delete_rsp_que(vha, rsp);
-                               if (ret != QLA_SUCCESS) {
-                                       qla_printk(KERN_WARNING, ha,
-                                               "Couldn't delete rsp que %d\n",
-                                               rsp->id);
-                                       return ret;
-                               }
-                       }
                }
-       } else {  /* delete all queues of this host */
-               for (cnt = 0; cnt < QLA_MAX_HOST_QUES; cnt++) {
-                       /* Delete request queues */
-                       req = ha->req_q_map[vha->req_ques[cnt]];
-                       if (req && req->id) {
-                               rsp = req->rsp;
-                               ret = qla25xx_delete_req_que(vha, req);
-                               if (ret != QLA_SUCCESS) {
-                                       qla_printk(KERN_WARNING, ha,
-                                               "Couldn't delete req que %d\n",
-                                               vha->req_ques[cnt]);
-                                       return ret;
-                               }
-                               vha->req_ques[cnt] = ha->req_q_map[0]->id;
-                       /* Delete associated response queue */
-                               if (rsp && rsp->id) {
-                                       ret = qla25xx_delete_rsp_que(vha, rsp);
-                                       if (ret != QLA_SUCCESS) {
-                                               qla_printk(KERN_WARNING, ha,
-                                               "Couldn't delete rsp que %d\n",
-                                               rsp->id);
-                                               return ret;
-                                       }
-                               }
+       }
+
+       /* Delete response queues */
+       for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
+               rsp = ha->rsp_q_map[cnt];
+               if (rsp) {
+                       ret = qla25xx_delete_rsp_que(vha, rsp);
+                       if (ret != QLA_SUCCESS) {
+                               qla_printk(KERN_WARNING, ha,
+                               "Couldn't delete rsp que %d\n",
+                               rsp->id);
+                               return ret;
                        }
                }
        }
-       qla_printk(KERN_INFO, ha, "Queues deleted for vport:%d\n",
-               vha->vp_idx);
        return ret;
 }
 
 int
 qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
-       uint8_t vp_idx, uint16_t rid, uint8_t rsp_que, uint8_t qos)
+       uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos)
 {
        int ret = 0;
        struct req_que *req = NULL;
        struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
        uint16_t que_id = 0;
        device_reg_t __iomem *reg;
+       uint32_t cnt;
 
        req = kzalloc(sizeof(struct req_que), GFP_KERNEL);
        if (req == NULL) {
@@ -604,8 +580,8 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        }
 
        mutex_lock(&ha->vport_lock);
-       que_id = find_first_zero_bit(ha->req_qid_map, ha->max_queues);
-       if (que_id >= ha->max_queues) {
+       que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues);
+       if (que_id >= ha->max_req_queues) {
                mutex_unlock(&ha->vport_lock);
                qla_printk(KERN_INFO, ha, "No resources to create "
                         "additional request queue\n");
@@ -617,10 +593,10 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        req->vp_idx = vp_idx;
        req->qos = qos;
 
-       if (ha->rsp_q_map[rsp_que]) {
+       if (rsp_que < 0)
+               req->rsp = NULL;
+       else
                req->rsp = ha->rsp_q_map[rsp_que];
-               req->rsp->req = req;
-       }
        /* Use alternate PCI bus number */
        if (MSB(req->rid))
                options |= BIT_4;
@@ -628,13 +604,16 @@ qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options,
        if (LSB(req->rid))
                options |= BIT_5;
        req->options = options;
+
+       for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++)
+               req->outstanding_cmds[cnt] = NULL;
+       req->current_outstanding_cmd = 1;
+
        req->ring_ptr = req->ring;
        req->ring_index = 0;
        req->cnt = req->length;
        req->id = que_id;
        reg = ISP_QUE_REG(ha, que_id);
-       req->req_q_in = &reg->isp25mq.req_q_in;
-       req->req_q_out = &reg->isp25mq.req_q_out;
        req->max_q_depth = ha->req_q_map[0]->max_q_depth;
        mutex_unlock(&ha->vport_lock);
 
@@ -654,10 +633,19 @@ que_failed:
        return 0;
 }
 
+static void qla_do_work(struct work_struct *work)
+{
+       struct rsp_que *rsp = container_of(work, struct rsp_que, q_work);
+       struct scsi_qla_host *vha;
+
+       vha = qla25xx_get_host(rsp);
+       qla24xx_process_response_queue(vha, rsp);
+}
+
 /* create response queue */
 int
 qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
-       uint8_t vp_idx, uint16_t rid)
+       uint8_t vp_idx, uint16_t rid, int req)
 {
        int ret = 0;
        struct rsp_que *rsp = NULL;
@@ -672,7 +660,7 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
                goto que_failed;
        }
 
-       rsp->length = RESPONSE_ENTRY_CNT_2300;
+       rsp->length = RESPONSE_ENTRY_CNT_MQ;
        rsp->ring = dma_alloc_coherent(&ha->pdev->dev,
                        (rsp->length + 1) * sizeof(response_t),
                        &rsp->dma, GFP_KERNEL);
@@ -683,8 +671,8 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
        }
 
        mutex_lock(&ha->vport_lock);
-       que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_queues);
-       if (que_id >= ha->max_queues) {
+       que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues);
+       if (que_id >= ha->max_rsp_queues) {
                mutex_unlock(&ha->vport_lock);
                qla_printk(KERN_INFO, ha, "No resources to create "
                         "additional response queue\n");
@@ -708,8 +696,6 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
        if (LSB(rsp->rid))
                options |= BIT_5;
        rsp->options = options;
-       rsp->ring_ptr = rsp->ring;
-       rsp->ring_index = 0;
        rsp->id = que_id;
        reg = ISP_QUE_REG(ha, que_id);
        rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
@@ -728,9 +714,14 @@ qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options,
                mutex_unlock(&ha->vport_lock);
                goto que_failed;
        }
+       if (req >= 0)
+               rsp->req = ha->req_q_map[req];
+       else
+               rsp->req = NULL;
 
        qla2x00_init_response_q_entries(rsp);
-
+       if (rsp->hw->wq)
+               INIT_WORK(&rsp->q_work, qla_do_work);
        return rsp->id;
 
 que_failed:
@@ -744,14 +735,16 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
        uint16_t options = 0;
        uint8_t ret = 0;
        struct qla_hw_data *ha = vha->hw;
+       struct rsp_que *rsp;
 
        options |= BIT_1;
-       ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0);
+       ret = qla25xx_create_rsp_que(ha, options, vha->vp_idx, 0, -1);
        if (!ret) {
                qla_printk(KERN_WARNING, ha, "Response Que create failed\n");
                return ret;
        } else
                qla_printk(KERN_INFO, ha, "Response Que:%d created.\n", ret);
+       rsp = ha->rsp_q_map[ret];
 
        options = 0;
        if (qos & BIT_7)
@@ -759,10 +752,11 @@ qla25xx_create_queues(struct scsi_qla_host *vha, uint8_t qos)
        ret = qla25xx_create_req_que(ha, options, vha->vp_idx, 0, ret,
                                        qos & ~BIT_7);
        if (ret) {
-               vha->req_ques[0] = ret;
+               vha->req = ha->req_q_map[ret];
                qla_printk(KERN_INFO, ha, "Request Que:%d created.\n", ret);
        } else
                qla_printk(KERN_WARNING, ha, "Request Que create failed\n");
+       rsp->req = ha->req_q_map[ret];
 
        return ret;
 }
index e4fdcdad80d0b7c12c1be44b0eb75a834ef37d49..dcf011679c8bbab5de6ab3d0b0dc58af4527ec80 100644 (file)
@@ -77,6 +77,14 @@ module_param(ql2xmaxqdepth, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xmaxqdepth,
                "Maximum queue depth to report for target devices.");
 
+int ql2xqfulltracking = 1;
+module_param(ql2xqfulltracking, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xqfulltracking,
+               "Controls whether the driver tracks queue full status "
+               "returns and dynamically adjusts a scsi device's queue "
+               "depth.  Default is 1, perform tracking.  Set to 0 to "
+               "disable dynamic tracking and adjustment of queue depth.");
+
 int ql2xqfullrampup = 120;
 module_param(ql2xqfullrampup, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xqfullrampup,
@@ -96,6 +104,23 @@ MODULE_PARM_DESC(ql2xmaxqueues,
                "Enables MQ settings "
                "Default is 1 for single queue. Set it to number \
                        of queues in MQ mode.");
+
+int ql2xmultique_tag;
+module_param(ql2xmultique_tag, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xmultique_tag,
+               "Enables CPU affinity settings for the driver "
+               "Default is 0 for no affinity of request and response IO. "
+               "Set it to 1 to turn on the cpu affinity.");
+
+int ql2xfwloadbin;
+module_param(ql2xfwloadbin, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xfwloadbin,
+               "Option to specify location from which to load ISP firmware:\n"
+               " 2 -- load firmware via the request_firmware() (hotplug)\n"
+               "      interface.\n"
+               " 1 -- load firmware from flash.\n"
+               " 0 -- use default semantics.\n");
+
 /*
  * SCSI host template entry points
  */
@@ -187,7 +212,7 @@ static void qla2x00_sp_free_dma(srb_t *);
 /* -------------------------------------------------------------------------- */
 static int qla2x00_alloc_queues(struct qla_hw_data *ha)
 {
-       ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_queues,
+       ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
                                GFP_KERNEL);
        if (!ha->req_q_map) {
                qla_printk(KERN_WARNING, ha,
@@ -195,7 +220,7 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
                goto fail_req_map;
        }
 
-       ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_queues,
+       ha->rsp_q_map = kzalloc(sizeof(struct rsp_que *) * ha->max_rsp_queues,
                                GFP_KERNEL);
        if (!ha->rsp_q_map) {
                qla_printk(KERN_WARNING, ha,
@@ -213,16 +238,8 @@ fail_req_map:
        return -ENOMEM;
 }
 
-static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
-       struct rsp_que *rsp)
+static void qla2x00_free_req_que(struct qla_hw_data *ha, struct req_que *req)
 {
-       if (rsp && rsp->ring)
-               dma_free_coherent(&ha->pdev->dev,
-               (rsp->length + 1) * sizeof(response_t),
-               rsp->ring, rsp->dma);
-
-       kfree(rsp);
-       rsp = NULL;
        if (req && req->ring)
                dma_free_coherent(&ha->pdev->dev,
                (req->length + 1) * sizeof(request_t),
@@ -232,22 +249,77 @@ static void qla2x00_free_que(struct qla_hw_data *ha, struct req_que *req,
        req = NULL;
 }
 
+static void qla2x00_free_rsp_que(struct qla_hw_data *ha, struct rsp_que *rsp)
+{
+       if (rsp && rsp->ring)
+               dma_free_coherent(&ha->pdev->dev,
+               (rsp->length + 1) * sizeof(response_t),
+               rsp->ring, rsp->dma);
+
+       kfree(rsp);
+       rsp = NULL;
+}
+
 static void qla2x00_free_queues(struct qla_hw_data *ha)
 {
        struct req_que *req;
        struct rsp_que *rsp;
        int cnt;
 
-       for (cnt = 0; cnt < ha->max_queues; cnt++) {
-               rsp = ha->rsp_q_map[cnt];
+       for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
                req = ha->req_q_map[cnt];
-               qla2x00_free_que(ha, req, rsp);
+               qla2x00_free_req_que(ha, req);
+       }
+       kfree(ha->req_q_map);
+       ha->req_q_map = NULL;
+
+       for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+               rsp = ha->rsp_q_map[cnt];
+               qla2x00_free_rsp_que(ha, rsp);
        }
        kfree(ha->rsp_q_map);
        ha->rsp_q_map = NULL;
+}
 
-       kfree(ha->req_q_map);
-       ha->req_q_map = NULL;
+static int qla25xx_setup_mode(struct scsi_qla_host *vha)
+{
+       uint16_t options = 0;
+       int ques, req, ret;
+       struct qla_hw_data *ha = vha->hw;
+
+       if (ql2xmultique_tag) {
+               /* CPU affinity mode */
+               ha->wq = create_workqueue("qla2xxx_wq");
+               /* create a request queue for IO */
+               options |= BIT_7;
+               req = qla25xx_create_req_que(ha, options, 0, 0, -1,
+                       QLA_DEFAULT_QUE_QOS);
+               if (!req) {
+                       qla_printk(KERN_WARNING, ha,
+                               "Can't create request queue\n");
+                       goto fail;
+               }
+               vha->req = ha->req_q_map[req];
+               options |= BIT_1;
+               for (ques = 1; ques < ha->max_rsp_queues; ques++) {
+                       ret = qla25xx_create_rsp_que(ha, options, 0, 0, req);
+                       if (!ret) {
+                               qla_printk(KERN_WARNING, ha,
+                                       "Response Queue create failed\n");
+                               goto fail2;
+                       }
+               }
+               DEBUG2(qla_printk(KERN_INFO, ha,
+                       "CPU affinity mode enabled, no. of response"
+                       " queues:%d, no. of request queues:%d\n",
+                       ha->max_rsp_queues, ha->max_req_queues));
+       }
+       return 0;
+fail2:
+       qla25xx_delete_queues(vha);
+fail:
+       ha->mqenable = 0;
+       return 1;
 }
 
 static char *
@@ -387,7 +459,6 @@ qla2x00_get_new_sp(scsi_qla_host_t *vha, fc_port_t *fcport,
 
        sp->fcport = fcport;
        sp->cmd = cmd;
-       sp->que = ha->req_q_map[0];
        sp->flags = 0;
        CMD_SP(cmd) = (void *)sp;
        cmd->scsi_done = done;
@@ -612,7 +683,7 @@ qla2x00_wait_for_loop_ready(scsi_qla_host_t *vha)
 void
 qla2x00_abort_fcport_cmds(fc_port_t *fcport)
 {
-       int cnt, que, id;
+       int cnt;
        unsigned long flags;
        srb_t *sp;
        scsi_qla_host_t *vha = fcport->vha;
@@ -620,32 +691,27 @@ qla2x00_abort_fcport_cmds(fc_port_t *fcport)
        struct req_que *req;
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       for (que = 0; que < QLA_MAX_HOST_QUES; que++) {
-               id = vha->req_ques[que];
-               req = ha->req_q_map[id];
-               if (!req)
+       req = vha->req;
+       for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
+               sp = req->outstanding_cmds[cnt];
+               if (!sp)
+                       continue;
+               if (sp->fcport != fcport)
                        continue;
-               for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
-                       sp = req->outstanding_cmds[cnt];
-                       if (!sp)
-                               continue;
-                       if (sp->fcport != fcport)
-                               continue;
 
-                       spin_unlock_irqrestore(&ha->hardware_lock, flags);
-                       if (ha->isp_ops->abort_command(vha, sp, req)) {
+               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+               if (ha->isp_ops->abort_command(sp)) {
+                       DEBUG2(qla_printk(KERN_WARNING, ha,
+                       "Abort failed --  %lx\n",
+                       sp->cmd->serial_number));
+               } else {
+                       if (qla2x00_eh_wait_on_command(sp->cmd) !=
+                               QLA_SUCCESS)
                                DEBUG2(qla_printk(KERN_WARNING, ha,
-                               "Abort failed --  %lx\n",
+                               "Abort failed while waiting --  %lx\n",
                                sp->cmd->serial_number));
-                       } else {
-                               if (qla2x00_eh_wait_on_command(sp->cmd) !=
-                                       QLA_SUCCESS)
-                                       DEBUG2(qla_printk(KERN_WARNING, ha,
-                                       "Abort failed while waiting --  %lx\n",
-                                       sp->cmd->serial_number));
-                       }
-                       spin_lock_irqsave(&ha->hardware_lock, flags);
                }
+               spin_lock_irqsave(&ha->hardware_lock, flags);
        }
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
@@ -693,7 +759,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        unsigned long flags;
        int wait = 0;
        struct qla_hw_data *ha = vha->hw;
-       struct req_que *req;
+       struct req_que *req = vha->req;
        srb_t *spt;
 
        qla2x00_block_error_handler(cmd);
@@ -709,7 +775,6 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
        spt = (srb_t *) CMD_SP(cmd);
        if (!spt)
                return SUCCESS;
-       req = spt->que;
 
        /* Check active list for command command. */
        spin_lock_irqsave(&ha->hardware_lock, flags);
@@ -726,7 +791,7 @@ qla2xxx_eh_abort(struct scsi_cmnd *cmd)
                " pid=%ld.\n", __func__, vha->host_no, sp, serial));
 
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
-               if (ha->isp_ops->abort_command(vha, sp, req)) {
+               if (ha->isp_ops->abort_command(sp)) {
                        DEBUG2(printk("%s(%ld): abort_command "
                        "mbx failed.\n", __func__, vha->host_no));
                        ret = FAILED;
@@ -777,7 +842,7 @@ qla2x00_eh_wait_for_pending_commands(scsi_qla_host_t *vha, unsigned int t,
                return status;
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       req = sp->que;
+       req = vha->req;
        for (cnt = 1; status == QLA_SUCCESS &&
                cnt < MAX_OUTSTANDING_COMMANDS; cnt++) {
                sp = req->outstanding_cmds[cnt];
@@ -820,7 +885,7 @@ static char *reset_errors[] = {
 
 static int
 __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
-    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int))
+    struct scsi_cmnd *cmd, int (*do_reset)(struct fc_port *, unsigned int, int))
 {
        scsi_qla_host_t *vha = shost_priv(cmd->device->host);
        fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
@@ -841,7 +906,8 @@ __qla2xxx_eh_generic_reset(char *name, enum nexus_wait_type type,
        if (qla2x00_wait_for_loop_ready(vha) != QLA_SUCCESS)
                goto eh_reset_failed;
        err = 2;
-       if (do_reset(fcport, cmd->device->lun) != QLA_SUCCESS)
+       if (do_reset(fcport, cmd->device->lun, cmd->request->cpu + 1)
+               != QLA_SUCCESS)
                goto eh_reset_failed;
        err = 3;
        if (qla2x00_eh_wait_for_pending_commands(vha, cmd->device->id,
@@ -996,6 +1062,9 @@ qla2xxx_eh_host_reset(struct scsi_cmnd *cmd)
                if (qla2x00_vp_abort_isp(vha))
                        goto eh_host_reset_lock;
        } else {
+               if (ha->wq)
+                       flush_workqueue(ha->wq);
+
                set_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
                if (qla2x00_abort_isp(base_vha)) {
                        clear_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags);
@@ -1037,7 +1106,8 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
        struct fc_port *fcport;
        struct qla_hw_data *ha = vha->hw;
 
-       if (ha->flags.enable_lip_full_login && !vha->vp_idx) {
+       if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
+           !IS_QLA81XX(ha)) {
                ret = qla2x00_full_login_lip(vha);
                if (ret != QLA_SUCCESS) {
                        DEBUG2_3(printk("%s(%ld): failed: "
@@ -1064,7 +1134,7 @@ qla2x00_loop_reset(scsi_qla_host_t *vha)
                        if (fcport->port_type != FCT_TARGET)
                                continue;
 
-                       ret = ha->isp_ops->target_reset(fcport, 0);
+                       ret = ha->isp_ops->target_reset(fcport, 0, 0);
                        if (ret != QLA_SUCCESS) {
                                DEBUG2_3(printk("%s(%ld): bus_reset failed: "
                                    "target_reset=%d d_id=%x.\n", __func__,
@@ -1088,7 +1158,7 @@ qla2x00_abort_all_cmds(scsi_qla_host_t *vha, int res)
        struct req_que *req;
 
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       for (que = 0; que < ha->max_queues; que++) {
+       for (que = 0; que < ha->max_req_queues; que++) {
                req = ha->req_q_map[que];
                if (!req)
                        continue;
@@ -1123,7 +1193,7 @@ qla2xxx_slave_configure(struct scsi_device *sdev)
        scsi_qla_host_t *vha = shost_priv(sdev->host);
        struct qla_hw_data *ha = vha->hw;
        struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
-       struct req_que *req = ha->req_q_map[vha->req_ques[0]];
+       struct req_que *req = vha->req;
 
        if (sdev->tagged_supported)
                scsi_activate_tcq(sdev, req->max_q_depth);
@@ -1511,6 +1581,13 @@ qla2x00_set_isp_flags(struct qla_hw_data *ha)
                ha->fw_srisc_address = RISC_START_ADDRESS_2400;
                break;
        }
+
+       /* Get adapter physical port no from interrupt pin register. */
+       pci_read_config_byte(ha->pdev, PCI_INTERRUPT_PIN, &ha->port_no);
+       if (ha->port_no & 1)
+               ha->flags.port0 = 1;
+       else
+               ha->flags.port0 = 0;
 }
 
 static int
@@ -1518,6 +1595,7 @@ qla2x00_iospace_config(struct qla_hw_data *ha)
 {
        resource_size_t pio;
        uint16_t msix;
+       int cpus;
 
        if (pci_request_selected_regions(ha->pdev, ha->bars,
            QLA2XXX_DRIVER_NAME)) {
@@ -1571,8 +1649,9 @@ skip_pio:
        }
 
        /* Determine queue resources */
-       ha->max_queues = 1;
-       if (ql2xmaxqueues <= 1 || (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
+       ha->max_req_queues = ha->max_rsp_queues = 1;
+       if ((ql2xmaxqueues <= 1 || ql2xmultique_tag < 1) &&
+               (!IS_QLA25XX(ha) && !IS_QLA81XX(ha)))
                goto mqiobase_exit;
        ha->mqiobase = ioremap(pci_resource_start(ha->pdev, 3),
                        pci_resource_len(ha->pdev, 3));
@@ -1582,18 +1661,24 @@ skip_pio:
                ha->msix_count = msix;
                /* Max queues are bounded by available msix vectors */
                /* queue 0 uses two msix vectors */
-               if (ha->msix_count - 1 < ql2xmaxqueues)
-                       ha->max_queues = ha->msix_count - 1;
-               else if (ql2xmaxqueues > QLA_MQ_SIZE)
-                       ha->max_queues = QLA_MQ_SIZE;
-               else
-                       ha->max_queues = ql2xmaxqueues;
+               if (ql2xmultique_tag) {
+                       cpus = num_online_cpus();
+                       ha->max_rsp_queues = (ha->msix_count - 1 - cpus) ?
+                               (cpus + 1) : (ha->msix_count - 1);
+                       ha->max_req_queues = 2;
+               } else if (ql2xmaxqueues > 1) {
+                       ha->max_req_queues = ql2xmaxqueues > QLA_MQ_SIZE ?
+                                               QLA_MQ_SIZE : ql2xmaxqueues;
+                       DEBUG2(qla_printk(KERN_INFO, ha, "QoS mode set, max no"
+                       " of request queues:%d\n", ha->max_req_queues));
+               }
                qla_printk(KERN_INFO, ha,
                        "MSI-X vector count: %d\n", msix);
-       }
+       } else
+               qla_printk(KERN_INFO, ha, "BAR 3 not enabled\n");
 
 mqiobase_exit:
-       ha->msix_count = ha->max_queues + 1;
+       ha->msix_count = ha->max_rsp_queues + 1;
        return (0);
 
 iospace_error_exit:
@@ -1605,6 +1690,9 @@ qla2xxx_scan_start(struct Scsi_Host *shost)
 {
        scsi_qla_host_t *vha = shost_priv(shost);
 
+       if (vha->hw->flags.running_gold_fw)
+               return;
+
        set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
        set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
        set_bit(RSCN_UPDATE, &vha->dpc_flags);
@@ -1768,6 +1856,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
                ha->gid_list_info_size = 8;
                ha->optrom_size = OPTROM_SIZE_81XX;
+               ha->nvram_npiv_size = QLA_MAX_VPORTS_QLA25XX;
                ha->isp_ops = &qla81xx_isp_ops;
                ha->flash_conf_off = FARX_ACCESS_FLASH_CONF_81XX;
                ha->flash_data_off = FARX_ACCESS_FLASH_DATA_81XX;
@@ -1803,14 +1892,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
                ret = -ENOMEM;
                qla2x00_mem_free(ha);
-               qla2x00_free_que(ha, req, rsp);
+               qla2x00_free_req_que(ha, req);
+               qla2x00_free_rsp_que(ha, rsp);
                goto probe_hw_failed;
        }
 
        pci_set_drvdata(pdev, base_vha);
 
        host = base_vha->host;
-       base_vha->req_ques[0] = req->id;
+       base_vha->req = req;
        host->can_queue = req->length + 128;
        if (IS_QLA2XXX_MIDTYPE(ha))
                base_vha->mgmt_svr_loop_id = 10 + base_vha->vp_idx;
@@ -1841,7 +1931,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        }
        ha->rsp_q_map[0] = rsp;
        ha->req_q_map[0] = req;
-
+       rsp->req = req;
+       req->rsp = rsp;
+       set_bit(0, ha->req_qid_map);
+       set_bit(0, ha->rsp_qid_map);
        /* FWI2-capable only. */
        req->req_q_in = &ha->iobase->isp24.req_q_in;
        req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -1866,6 +1959,15 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto probe_failed;
        }
 
+       if (ha->mqenable)
+               if (qla25xx_setup_mode(base_vha))
+                       qla_printk(KERN_WARNING, ha,
+                               "Can't create queues, falling back to single"
+                               " queue mode\n");
+
+       if (ha->flags.running_gold_fw)
+               goto skip_dpc;
+
        /*
         * Startup the kernel thread for this host adapter
         */
@@ -1878,6 +1980,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto probe_failed;
        }
 
+skip_dpc:
        list_add_tail(&base_vha->list, &ha->vp_list);
        base_vha->host->irq = ha->pdev->irq;
 
@@ -1917,8 +2020,9 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        return 0;
 
 probe_init_failed:
-       qla2x00_free_que(ha, req, rsp);
-       ha->max_queues = 0;
+       qla2x00_free_req_que(ha, req);
+       qla2x00_free_rsp_que(ha, rsp);
+       ha->max_req_queues = ha->max_rsp_queues = 0;
 
 probe_failed:
        if (base_vha->timer_active)
@@ -1976,6 +2080,13 @@ qla2x00_remove_one(struct pci_dev *pdev)
 
        base_vha->flags.online = 0;
 
+       /* Flush the work queue and remove it */
+       if (ha->wq) {
+               flush_workqueue(ha->wq);
+               destroy_workqueue(ha->wq);
+               ha->wq = NULL;
+       }
+
        /* Kill the kernel thread for this host */
        if (ha->dpc_thread) {
                struct task_struct *t = ha->dpc_thread;
@@ -2017,6 +2128,8 @@ qla2x00_free_device(scsi_qla_host_t *vha)
 {
        struct qla_hw_data *ha = vha->hw;
 
+       qla25xx_delete_queues(vha);
+
        if (ha->flags.fce_enabled)
                qla2x00_disable_fce_trace(vha, NULL, NULL);
 
@@ -2329,6 +2442,14 @@ qla2x00_mem_free(struct qla_hw_data *ha)
                vfree(ha->fw_dump);
        }
 
+       if (ha->dcbx_tlv)
+               dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
+                   ha->dcbx_tlv, ha->dcbx_tlv_dma);
+
+       if (ha->xgmac_data)
+               dma_free_coherent(&ha->pdev->dev, XGMAC_DATA_SIZE,
+                   ha->xgmac_data, ha->xgmac_data_dma);
+
        if (ha->sns_cmd)
                dma_free_coherent(&ha->pdev->dev, sizeof(struct sns_cmd_pkt),
                ha->sns_cmd, ha->sns_cmd_dma);
@@ -2412,6 +2533,8 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
        INIT_LIST_HEAD(&vha->work_list);
        INIT_LIST_HEAD(&vha->list);
 
+       spin_lock_init(&vha->work_lock);
+
        sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
        return vha;
 
@@ -2420,13 +2543,11 @@ fail:
 }
 
 static struct qla_work_evt *
-qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
-    int locked)
+qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type)
 {
        struct qla_work_evt *e;
 
-       e = kzalloc(sizeof(struct qla_work_evt), locked ? GFP_ATOMIC:
-           GFP_KERNEL);
+       e = kzalloc(sizeof(struct qla_work_evt), GFP_ATOMIC);
        if (!e)
                return NULL;
 
@@ -2437,17 +2558,15 @@ qla2x00_alloc_work(struct scsi_qla_host *vha, enum qla_work_type type,
 }
 
 static int
-qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e, int locked)
+qla2x00_post_work(struct scsi_qla_host *vha, struct qla_work_evt *e)
 {
-       unsigned long uninitialized_var(flags);
-       struct qla_hw_data *ha = vha->hw;
+       unsigned long flags;
 
-       if (!locked)
-               spin_lock_irqsave(&ha->hardware_lock, flags);
+       spin_lock_irqsave(&vha->work_lock, flags);
        list_add_tail(&e->list, &vha->work_list);
+       spin_unlock_irqrestore(&vha->work_lock, flags);
        qla2xxx_wake_dpc(vha);
-       if (!locked)
-               spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
        return QLA_SUCCESS;
 }
 
@@ -2457,13 +2576,13 @@ qla2x00_post_aen_work(struct scsi_qla_host *vha, enum fc_host_event_code code,
 {
        struct qla_work_evt *e;
 
-       e = qla2x00_alloc_work(vha, QLA_EVT_AEN, 1);
+       e = qla2x00_alloc_work(vha, QLA_EVT_AEN);
        if (!e)
                return QLA_FUNCTION_FAILED;
 
        e->u.aen.code = code;
        e->u.aen.data = data;
-       return qla2x00_post_work(vha, e, 1);
+       return qla2x00_post_work(vha, e);
 }
 
 int
@@ -2471,25 +2590,27 @@ qla2x00_post_idc_ack_work(struct scsi_qla_host *vha, uint16_t *mb)
 {
        struct qla_work_evt *e;
 
-       e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK, 1);
+       e = qla2x00_alloc_work(vha, QLA_EVT_IDC_ACK);
        if (!e)
                return QLA_FUNCTION_FAILED;
 
        memcpy(e->u.idc_ack.mb, mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
-       return qla2x00_post_work(vha, e, 1);
+       return qla2x00_post_work(vha, e);
 }
 
 static void
 qla2x00_do_work(struct scsi_qla_host *vha)
 {
-       struct qla_work_evt *e;
-       struct qla_hw_data *ha = vha->hw;
+       struct qla_work_evt *e, *tmp;
+       unsigned long flags;
+       LIST_HEAD(work);
 
-       spin_lock_irq(&ha->hardware_lock);
-       while (!list_empty(&vha->work_list)) {
-               e = list_entry(vha->work_list.next, struct qla_work_evt, list);
+       spin_lock_irqsave(&vha->work_lock, flags);
+       list_splice_init(&vha->work_list, &work);
+       spin_unlock_irqrestore(&vha->work_lock, flags);
+
+       list_for_each_entry_safe(e, tmp, &work, list) {
                list_del_init(&e->list);
-               spin_unlock_irq(&ha->hardware_lock);
 
                switch (e->type) {
                case QLA_EVT_AEN:
@@ -2502,10 +2623,9 @@ qla2x00_do_work(struct scsi_qla_host *vha)
                }
                if (e->flags & QLA_EVT_FLAG_FREE)
                        kfree(e);
-               spin_lock_irq(&ha->hardware_lock);
        }
-       spin_unlock_irq(&ha->hardware_lock);
 }
+
 /* Relogins all the fcports of a vport
  * Context: dpc thread
  */
index 152ecfc26cd201e4ad0b304708e2962a7619afa2..6260505dceb5ff20fc00e8305734453c08872a38 100644 (file)
@@ -219,8 +219,8 @@ qla2x00_write_nvram_word(struct qla_hw_data *ha, uint32_t addr, uint16_t data)
        wait_cnt = NVR_WAIT_CNT;
        do {
                if (!--wait_cnt) {
-                       DEBUG9_10(printk("%s(%ld): NVRAM didn't go ready...\n",
-                           __func__, vha->host_no));
+                       DEBUG9_10(qla_printk(KERN_WARNING, ha,
+                           "NVRAM didn't go ready...\n"));
                        break;
                }
                NVRAM_DELAY();
@@ -349,7 +349,7 @@ qla2x00_clear_nvram_protection(struct qla_hw_data *ha)
                wait_cnt = NVR_WAIT_CNT;
                do {
                        if (!--wait_cnt) {
-                               DEBUG9_10(qla_printk(
+                               DEBUG9_10(qla_printk(KERN_WARNING, ha,
                                    "NVRAM didn't go ready...\n"));
                                break;
                        }
@@ -408,7 +408,8 @@ qla2x00_set_nvram_protection(struct qla_hw_data *ha, int stat)
        wait_cnt = NVR_WAIT_CNT;
        do {
                if (!--wait_cnt) {
-                       DEBUG9_10(qla_printk("NVRAM didn't go ready...\n"));
+                       DEBUG9_10(qla_printk(KERN_WARNING, ha,
+                           "NVRAM didn't go ready...\n"));
                        break;
                }
                NVRAM_DELAY();
@@ -701,32 +702,35 @@ qla2xxx_get_flt_info(scsi_qla_host_t *vha, uint32_t flt_addr)
                        break;
                case FLT_REG_VPD_0:
                        ha->flt_region_vpd_nvram = start;
-                       if (!(PCI_FUNC(ha->pdev->devfn) & 1))
+                       if (ha->flags.port0)
                                ha->flt_region_vpd = start;
                        break;
                case FLT_REG_VPD_1:
-                       if (PCI_FUNC(ha->pdev->devfn) & 1)
+                       if (!ha->flags.port0)
                                ha->flt_region_vpd = start;
                        break;
                case FLT_REG_NVRAM_0:
-                       if (!(PCI_FUNC(ha->pdev->devfn) & 1))
+                       if (ha->flags.port0)
                                ha->flt_region_nvram = start;
                        break;
                case FLT_REG_NVRAM_1:
-                       if (PCI_FUNC(ha->pdev->devfn) & 1)
+                       if (!ha->flags.port0)
                                ha->flt_region_nvram = start;
                        break;
                case FLT_REG_FDT:
                        ha->flt_region_fdt = start;
                        break;
                case FLT_REG_NPIV_CONF_0:
-                       if (!(PCI_FUNC(ha->pdev->devfn) & 1))
+                       if (ha->flags.port0)
                                ha->flt_region_npiv_conf = start;
                        break;
                case FLT_REG_NPIV_CONF_1:
-                       if (PCI_FUNC(ha->pdev->devfn) & 1)
+                       if (!ha->flags.port0)
                                ha->flt_region_npiv_conf = start;
                        break;
+               case FLT_REG_GOLD_FW:
+                       ha->flt_region_gold_fw = start;
+                       break;
                }
        }
        goto done;
@@ -744,12 +748,12 @@ no_flash_data:
        ha->flt_region_fw = def_fw[def];
        ha->flt_region_boot = def_boot[def];
        ha->flt_region_vpd_nvram = def_vpd_nvram[def];
-       ha->flt_region_vpd = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
+       ha->flt_region_vpd = ha->flags.port0 ?
            def_vpd0[def]: def_vpd1[def];
-       ha->flt_region_nvram = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
+       ha->flt_region_nvram = ha->flags.port0 ?
            def_nvram0[def]: def_nvram1[def];
        ha->flt_region_fdt = def_fdt[def];
-       ha->flt_region_npiv_conf = !(PCI_FUNC(ha->pdev->devfn) & 1) ?
+       ha->flt_region_npiv_conf = ha->flags.port0 ?
            def_npiv_conf0[def]: def_npiv_conf1[def];
 done:
        DEBUG2(qla_printk(KERN_DEBUG, ha, "FLT[%s]: boot=0x%x fw=0x%x "
@@ -924,6 +928,8 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
                struct fc_vport_identifiers vid;
                struct fc_vport *vport;
 
+               memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
+
                flags = le16_to_cpu(entry->flags);
                if (flags == 0xffff)
                        continue;
@@ -937,9 +943,7 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
                vid.port_name = wwn_to_u64(entry->port_name);
                vid.node_name = wwn_to_u64(entry->node_name);
 
-               memcpy(&ha->npiv_info[i], entry, sizeof(struct qla_npiv_entry));
-
-               DEBUG2(qla_printk(KERN_DEBUG, ha, "NPIV[%02x]: wwpn=%llx "
+               DEBUG2(qla_printk(KERN_INFO, ha, "NPIV[%02x]: wwpn=%llx "
                        "wwnn=%llx vf_id=0x%x Q_qos=0x%x F_qos=0x%x.\n", cnt,
                        vid.port_name, vid.node_name, le16_to_cpu(entry->vf_id),
                        entry->q_qos, entry->f_qos));
@@ -955,7 +959,6 @@ qla2xxx_flash_npiv_conf(scsi_qla_host_t *vha)
        }
 done:
        kfree(data);
-       ha->npiv_info = NULL;
 }
 
 static int
@@ -1079,8 +1082,9 @@ qla24xx_write_flash_data(scsi_qla_host_t *vha, uint32_t *dwptr, uint32_t faddr,
                                    0xff0000) | ((fdata >> 16) & 0xff));
                        ret = qla24xx_erase_sector(vha, fdata);
                        if (ret != QLA_SUCCESS) {
-                               DEBUG9(qla_printk("Unable to erase sector: "
-                                   "address=%x.\n", faddr));
+                               DEBUG9(qla_printk(KERN_WARNING, ha,
+                                   "Unable to erase sector: address=%x.\n",
+                                   faddr));
                                break;
                        }
                }
@@ -1240,8 +1244,9 @@ qla24xx_write_nvram_data(scsi_qla_host_t *vha, uint8_t *buf, uint32_t naddr,
                ret = qla24xx_write_flash_dword(ha,
                    nvram_data_addr(ha, naddr), cpu_to_le32(*dwptr));
                if (ret != QLA_SUCCESS) {
-                       DEBUG9(qla_printk("Unable to program nvram address=%x "
-                           "data=%x.\n", naddr, *dwptr));
+                       DEBUG9(qla_printk(KERN_WARNING, ha,
+                           "Unable to program nvram address=%x data=%x.\n",
+                           naddr, *dwptr));
                        break;
                }
        }
index 19d1afc3a34345fba4daa04f5c3f08959db5b891..b63feaf43126bd27f35c951453265c9e27c896bf 100644 (file)
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.01-k1"
+#define QLA2XXX_VERSION      "8.03.01-k3"
 
 #define QLA_DRIVER_MAJOR_VER   8
 #define QLA_DRIVER_MINOR_VER   3
index 166417a6afbab0ceb2a30dec46a1cfddeb02a5a1..2de5f3ad640b2da78eba5401454c8415f44841e4 100644 (file)
@@ -1225,8 +1225,8 @@ EXPORT_SYMBOL(__scsi_device_lookup_by_target);
  * @starget:   SCSI target pointer
  * @lun:       SCSI Logical Unit Number
  *
- * Description: Looks up the scsi_device with the specified @channel, @id, @lun
- * for a given host.  The returned scsi_device has an additional reference that
+ * Description: Looks up the scsi_device with the specified @lun for a given
+ * @starget.  The returned scsi_device has an additional reference that
  * needs to be released with scsi_device_put once you're done with it.
  **/
 struct scsi_device *scsi_device_lookup_by_target(struct scsi_target *starget,
index 213123b0486b74a985492715b1c79dde0321682f..41a21772df1289525ad8859608d1d54c73461e76 100644 (file)
@@ -887,7 +887,7 @@ static int resp_start_stop(struct scsi_cmnd * scp,
 static sector_t get_sdebug_capacity(void)
 {
        if (scsi_debug_virtual_gb > 0)
-               return 2048 * 1024 * scsi_debug_virtual_gb;
+               return 2048 * 1024 * (sector_t)scsi_debug_virtual_gb;
        else
                return sdebug_store_sectors;
 }
index 0c2c73be197469289e17defb2bebb28febc4814e..a1689353d7fd715cf91776e9b290381795876477 100644 (file)
@@ -641,9 +641,9 @@ EXPORT_SYMBOL(scsi_eh_prep_cmnd);
 /**
  * scsi_eh_restore_cmnd  - Restore a scsi command info as part of error recory
  * @scmd:       SCSI command structure to restore
- * @ses:        saved information from a coresponding call to scsi_prep_eh_cmnd
+ * @ses:        saved information from a coresponding call to scsi_eh_prep_cmnd
  *
- * Undo any damage done by above scsi_prep_eh_cmnd().
+ * Undo any damage done by above scsi_eh_prep_cmnd().
  */
 void scsi_eh_restore_cmnd(struct scsi_cmnd* scmd, struct scsi_eh_save *ses)
 {
@@ -1451,28 +1451,21 @@ static void eh_lock_door_done(struct request *req, int uptodate)
  * @sdev:      SCSI device to prevent medium removal
  *
  * Locking:
- *     We must be called from process context; scsi_allocate_request()
- *     may sleep.
+ *     We must be called from process context.
  *
  * Notes:
  *     We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the
  *     head of the devices request queue, and continue.
- *
- * Bugs:
- *     scsi_allocate_request() may sleep waiting for existing requests to
- *     be processed.  However, since we haven't kicked off any request
- *     processing for this host, this may deadlock.
- *
- *     If scsi_allocate_request() fails for what ever reason, we
- *     completely forget to lock the door.
  */
 static void scsi_eh_lock_door(struct scsi_device *sdev)
 {
        struct request *req;
 
+       /*
+        * blk_get_request with GFP_KERNEL (__GFP_WAIT) sleeps until a
+        * request becomes available
+        */
        req = blk_get_request(sdev->request_queue, READ, GFP_KERNEL);
-       if (!req)
-               return;
 
        req->cmd[0] = ALLOW_MEDIUM_REMOVAL;
        req->cmd[1] = 0;
index dd3f9d2b99fd05b7834e0abbb7e2cbe23e12d462..30f3275e119ed57473f1fd4e91270d38538eb025 100644 (file)
@@ -2412,20 +2412,18 @@ int
 scsi_internal_device_unblock(struct scsi_device *sdev)
 {
        struct request_queue *q = sdev->request_queue; 
-       int err;
        unsigned long flags;
        
        /* 
         * Try to transition the scsi device to SDEV_RUNNING
         * and goose the device queue if successful.  
         */
-       err = scsi_device_set_state(sdev, SDEV_RUNNING);
-       if (err) {
-               err = scsi_device_set_state(sdev, SDEV_CREATED);
-
-               if (err)
-                       return err;
-       }
+       if (sdev->sdev_state == SDEV_BLOCK)
+               sdev->sdev_state = SDEV_RUNNING;
+       else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
+               sdev->sdev_state = SDEV_CREATED;
+       else
+               return -EINVAL;
 
        spin_lock_irqsave(q->queue_lock, flags);
        blk_start_queue(q);
index e2b50d8f57a86c686b932e2e6ae7db35001ff279..c44783801402083b7dbd610df35f8990eb090ef9 100644 (file)
@@ -115,12 +115,12 @@ MODULE_PARM_DESC(max_report_luns,
                 "REPORT LUNS maximum number of LUNS received (should be"
                 " between 1 and 16384)");
 
-static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ+3;
+static unsigned int scsi_inq_timeout = SCSI_TIMEOUT/HZ + 18;
 
 module_param_named(inq_timeout, scsi_inq_timeout, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(inq_timeout, 
                 "Timeout (in seconds) waiting for devices to answer INQUIRY."
-                " Default is 5. Some non-compliant devices need more.");
+                " Default is 20. Some devices may need more; most need less.");
 
 /* This lock protects only this list */
 static DEFINE_SPINLOCK(async_scan_lock);
index 0a2ce7b6325cdbf13d5aceda17add03898567f24..f3e664628d7ae273d6e97c2f794d990b7ea9dcfd 100644 (file)
@@ -37,7 +37,6 @@
 #define ISCSI_TRANSPORT_VERSION "2.0-870"
 
 struct iscsi_internal {
-       int daemon_pid;
        struct scsi_transport_template t;
        struct iscsi_transport *iscsi_transport;
        struct list_head list;
@@ -938,23 +937,9 @@ iscsi_if_transport_lookup(struct iscsi_transport *tt)
 }
 
 static int
-iscsi_broadcast_skb(struct sk_buff *skb, gfp_t gfp)
+iscsi_multicast_skb(struct sk_buff *skb, uint32_t group, gfp_t gfp)
 {
-       return netlink_broadcast(nls, skb, 0, 1, gfp);
-}
-
-static int
-iscsi_unicast_skb(struct sk_buff *skb, int pid)
-{
-       int rc;
-
-       rc = netlink_unicast(nls, skb, pid, MSG_DONTWAIT);
-       if (rc < 0) {
-               printk(KERN_ERR "iscsi: can not unicast skb (%d)\n", rc);
-               return rc;
-       }
-
-       return 0;
+       return nlmsg_multicast(nls, skb, 0, group, gfp);
 }
 
 int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
@@ -980,7 +965,7 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
                return -ENOMEM;
        }
 
-       nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+       nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
        ev = NLMSG_DATA(nlh);
        memset(ev, 0, sizeof(*ev));
        ev->transport_handle = iscsi_handle(conn->transport);
@@ -991,10 +976,45 @@ int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
        memcpy(pdu, hdr, sizeof(struct iscsi_hdr));
        memcpy(pdu + sizeof(struct iscsi_hdr), data, data_size);
 
-       return iscsi_unicast_skb(skb, priv->daemon_pid);
+       return iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
 }
 EXPORT_SYMBOL_GPL(iscsi_recv_pdu);
 
+int iscsi_offload_mesg(struct Scsi_Host *shost,
+                      struct iscsi_transport *transport, uint32_t type,
+                      char *data, uint16_t data_size)
+{
+       struct nlmsghdr *nlh;
+       struct sk_buff *skb;
+       struct iscsi_uevent *ev;
+       int len = NLMSG_SPACE(sizeof(*ev) + data_size);
+
+       skb = alloc_skb(len, GFP_NOIO);
+       if (!skb) {
+               printk(KERN_ERR "can not deliver iscsi offload message:OOM\n");
+               return -ENOMEM;
+       }
+
+       nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
+       ev = NLMSG_DATA(nlh);
+       memset(ev, 0, sizeof(*ev));
+       ev->type = type;
+       ev->transport_handle = iscsi_handle(transport);
+       switch (type) {
+       case ISCSI_KEVENT_PATH_REQ:
+               ev->r.req_path.host_no = shost->host_no;
+               break;
+       case ISCSI_KEVENT_IF_DOWN:
+               ev->r.notify_if_down.host_no = shost->host_no;
+               break;
+       }
+
+       memcpy((char *)ev + sizeof(*ev), data, data_size);
+
+       return iscsi_multicast_skb(skb, ISCSI_NL_GRP_UIP, GFP_NOIO);
+}
+EXPORT_SYMBOL_GPL(iscsi_offload_mesg);
+
 void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
 {
        struct nlmsghdr *nlh;
@@ -1014,7 +1034,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
                return;
        }
 
-       nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+       nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
        ev = NLMSG_DATA(nlh);
        ev->transport_handle = iscsi_handle(conn->transport);
        ev->type = ISCSI_KEVENT_CONN_ERROR;
@@ -1022,7 +1042,7 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
        ev->r.connerror.cid = conn->cid;
        ev->r.connerror.sid = iscsi_conn_get_sid(conn);
 
-       iscsi_broadcast_skb(skb, GFP_ATOMIC);
+       iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_ATOMIC);
 
        iscsi_cls_conn_printk(KERN_INFO, conn, "detected conn error (%d)\n",
                              error);
@@ -1030,8 +1050,8 @@ void iscsi_conn_error_event(struct iscsi_cls_conn *conn, enum iscsi_err error)
 EXPORT_SYMBOL_GPL(iscsi_conn_error_event);
 
 static int
-iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
-                     void *payload, int size)
+iscsi_if_send_reply(uint32_t group, int seq, int type, int done, int multi,
+                   void *payload, int size)
 {
        struct sk_buff  *skb;
        struct nlmsghdr *nlh;
@@ -1045,10 +1065,10 @@ iscsi_if_send_reply(int pid, int seq, int type, int done, int multi,
                return -ENOMEM;
        }
 
-       nlh = __nlmsg_put(skb, pid, seq, t, (len - sizeof(*nlh)), 0);
+       nlh = __nlmsg_put(skb, 0, 0, t, (len - sizeof(*nlh)), 0);
        nlh->nlmsg_flags = flags;
        memcpy(NLMSG_DATA(nlh), payload, size);
-       return iscsi_unicast_skb(skb, pid);
+       return iscsi_multicast_skb(skb, group, GFP_ATOMIC);
 }
 
 static int
@@ -1085,7 +1105,7 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
                        return -ENOMEM;
                }
 
-               nlhstat = __nlmsg_put(skbstat, priv->daemon_pid, 0, 0,
+               nlhstat = __nlmsg_put(skbstat, 0, 0, 0,
                                      (len - sizeof(*nlhstat)), 0);
                evstat = NLMSG_DATA(nlhstat);
                memset(evstat, 0, sizeof(*evstat));
@@ -1109,7 +1129,8 @@ iscsi_if_get_stats(struct iscsi_transport *transport, struct nlmsghdr *nlh)
                skb_trim(skbstat, NLMSG_ALIGN(actual_size));
                nlhstat->nlmsg_len = actual_size;
 
-               err = iscsi_unicast_skb(skbstat, priv->daemon_pid);
+               err = iscsi_multicast_skb(skbstat, ISCSI_NL_GRP_ISCSID,
+                                         GFP_ATOMIC);
        } while (err < 0 && err != -ECONNREFUSED);
 
        return err;
@@ -1143,7 +1164,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
                return -ENOMEM;
        }
 
-       nlh = __nlmsg_put(skb, priv->daemon_pid, 0, 0, (len - sizeof(*nlh)), 0);
+       nlh = __nlmsg_put(skb, 0, 0, 0, (len - sizeof(*nlh)), 0);
        ev = NLMSG_DATA(nlh);
        ev->transport_handle = iscsi_handle(session->transport);
 
@@ -1172,7 +1193,7 @@ int iscsi_session_event(struct iscsi_cls_session *session,
         * this will occur if the daemon is not up, so we just warn
         * the user and when the daemon is restarted it will handle it
         */
-       rc = iscsi_broadcast_skb(skb, GFP_KERNEL);
+       rc = iscsi_multicast_skb(skb, ISCSI_NL_GRP_ISCSID, GFP_KERNEL);
        if (rc == -ESRCH)
                iscsi_cls_session_printk(KERN_ERR, session,
                                         "Cannot notify userspace of session "
@@ -1268,26 +1289,54 @@ iscsi_set_param(struct iscsi_transport *transport, struct iscsi_uevent *ev)
        return err;
 }
 
+static int iscsi_if_ep_connect(struct iscsi_transport *transport,
+                              struct iscsi_uevent *ev, int msg_type)
+{
+       struct iscsi_endpoint *ep;
+       struct sockaddr *dst_addr;
+       struct Scsi_Host *shost = NULL;
+       int non_blocking, err = 0;
+
+       if (!transport->ep_connect)
+               return -EINVAL;
+
+       if (msg_type == ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST) {
+               shost = scsi_host_lookup(ev->u.ep_connect_through_host.host_no);
+               if (!shost) {
+                       printk(KERN_ERR "ep connect failed. Could not find "
+                              "host no %u\n",
+                              ev->u.ep_connect_through_host.host_no);
+                       return -ENODEV;
+               }
+               non_blocking = ev->u.ep_connect_through_host.non_blocking;
+       } else
+               non_blocking = ev->u.ep_connect.non_blocking;
+
+       dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
+       ep = transport->ep_connect(shost, dst_addr, non_blocking);
+       if (IS_ERR(ep)) {
+               err = PTR_ERR(ep);
+               goto release_host;
+       }
+
+       ev->r.ep_connect_ret.handle = ep->id;
+release_host:
+       if (shost)
+               scsi_host_put(shost);
+       return err;
+}
+
 static int
 iscsi_if_transport_ep(struct iscsi_transport *transport,
                      struct iscsi_uevent *ev, int msg_type)
 {
        struct iscsi_endpoint *ep;
-       struct sockaddr *dst_addr;
        int rc = 0;
 
        switch (msg_type) {
+       case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
        case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
-               if (!transport->ep_connect)
-                       return -EINVAL;
-
-               dst_addr = (struct sockaddr *)((char*)ev + sizeof(*ev));
-               ep = transport->ep_connect(dst_addr,
-                                          ev->u.ep_connect.non_blocking);
-               if (IS_ERR(ep))
-                       return PTR_ERR(ep);
-
-               ev->r.ep_connect_ret.handle = ep->id;
+               rc = iscsi_if_ep_connect(transport, ev, msg_type);
                break;
        case ISCSI_UEVENT_TRANSPORT_EP_POLL:
                if (!transport->ep_poll)
@@ -1365,7 +1414,31 @@ iscsi_set_host_param(struct iscsi_transport *transport,
 }
 
 static int
-iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
+iscsi_set_path(struct iscsi_transport *transport, struct iscsi_uevent *ev)
+{
+       struct Scsi_Host *shost;
+       struct iscsi_path *params;
+       int err;
+
+       if (!transport->set_path)
+               return -ENOSYS;
+
+       shost = scsi_host_lookup(ev->u.set_path.host_no);
+       if (!shost) {
+               printk(KERN_ERR "set path could not find host no %u\n",
+                      ev->u.set_path.host_no);
+               return -ENODEV;
+       }
+
+       params = (struct iscsi_path *)((char *)ev + sizeof(*ev));
+       err = transport->set_path(shost, params);
+
+       scsi_host_put(shost);
+       return err;
+}
+
+static int
+iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, uint32_t *group)
 {
        int err = 0;
        struct iscsi_uevent *ev = NLMSG_DATA(nlh);
@@ -1375,6 +1448,11 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        struct iscsi_cls_conn *conn;
        struct iscsi_endpoint *ep = NULL;
 
+       if (nlh->nlmsg_type == ISCSI_UEVENT_PATH_UPDATE)
+               *group = ISCSI_NL_GRP_UIP;
+       else
+               *group = ISCSI_NL_GRP_ISCSID;
+
        priv = iscsi_if_transport_lookup(iscsi_ptr(ev->transport_handle));
        if (!priv)
                return -EINVAL;
@@ -1383,8 +1461,6 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        if (!try_module_get(transport->owner))
                return -EINVAL;
 
-       priv->daemon_pid = NETLINK_CREDS(skb)->pid;
-
        switch (nlh->nlmsg_type) {
        case ISCSI_UEVENT_CREATE_SESSION:
                err = iscsi_if_create_session(priv, ep, ev,
@@ -1469,6 +1545,7 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        case ISCSI_UEVENT_TRANSPORT_EP_CONNECT:
        case ISCSI_UEVENT_TRANSPORT_EP_POLL:
        case ISCSI_UEVENT_TRANSPORT_EP_DISCONNECT:
+       case ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST:
                err = iscsi_if_transport_ep(transport, ev, nlh->nlmsg_type);
                break;
        case ISCSI_UEVENT_TGT_DSCVR:
@@ -1477,6 +1554,9 @@ iscsi_if_recv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
        case ISCSI_UEVENT_SET_HOST_PARAM:
                err = iscsi_set_host_param(transport, ev);
                break;
+       case ISCSI_UEVENT_PATH_UPDATE:
+               err = iscsi_set_path(transport, ev);
+               break;
        default:
                err = -ENOSYS;
                break;
@@ -1499,6 +1579,7 @@ iscsi_if_rx(struct sk_buff *skb)
                uint32_t rlen;
                struct nlmsghdr *nlh;
                struct iscsi_uevent *ev;
+               uint32_t group;
 
                nlh = nlmsg_hdr(skb);
                if (nlh->nlmsg_len < sizeof(*nlh) ||
@@ -1511,7 +1592,7 @@ iscsi_if_rx(struct sk_buff *skb)
                if (rlen > skb->len)
                        rlen = skb->len;
 
-               err = iscsi_if_recv_msg(skb, nlh);
+               err = iscsi_if_recv_msg(skb, nlh, &group);
                if (err) {
                        ev->type = ISCSI_KEVENT_IF_ERROR;
                        ev->iferror = err;
@@ -1525,8 +1606,7 @@ iscsi_if_rx(struct sk_buff *skb)
                         */
                        if (ev->type == ISCSI_UEVENT_GET_STATS && !err)
                                break;
-                       err = iscsi_if_send_reply(
-                               NETLINK_CREDS(skb)->pid, nlh->nlmsg_seq,
+                       err = iscsi_if_send_reply(group, nlh->nlmsg_seq,
                                nlh->nlmsg_type, 0, 0, ev, sizeof(*ev));
                } while (err < 0 && err != -ECONNREFUSED);
                skb_pull(skb, rlen);
@@ -1774,7 +1854,6 @@ iscsi_register_transport(struct iscsi_transport *tt)
        if (!priv)
                return NULL;
        INIT_LIST_HEAD(&priv->list);
-       priv->daemon_pid = -1;
        priv->iscsi_transport = tt;
        priv->t.user_scan = iscsi_user_scan;
        priv->t.create_work_queue = 1;
index bcf3bd40bbd5fc3bfbecb29f0d7b21b5e393c8b3..878b17a9af3008ab5fe29afc7a8faa4a9c99d58d 100644 (file)
@@ -1902,24 +1902,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
        index = sdkp->index;
        dev = &sdp->sdev_gendev;
 
-       if (!sdp->request_queue->rq_timeout) {
-               if (sdp->type != TYPE_MOD)
-                       blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
-               else
-                       blk_queue_rq_timeout(sdp->request_queue,
-                                            SD_MOD_TIMEOUT);
-       }
-
-       device_initialize(&sdkp->dev);
-       sdkp->dev.parent = &sdp->sdev_gendev;
-       sdkp->dev.class = &sd_disk_class;
-       dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
-
-       if (device_add(&sdkp->dev))
-               goto out_free_index;
-
-       get_device(&sdp->sdev_gendev);
-
        if (index < SD_MAX_DISKS) {
                gd->major = sd_major((index & 0xf0) >> 4);
                gd->first_minor = ((index & 0xf) << 4) | (index & 0xfff00);
@@ -1954,11 +1936,6 @@ static void sd_probe_async(void *data, async_cookie_t cookie)
 
        sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
                  sdp->removable ? "removable " : "");
-
-       return;
-
- out_free_index:
-       ida_remove(&sd_index_ida, index);
 }
 
 /**
@@ -2026,6 +2003,24 @@ static int sd_probe(struct device *dev)
        sdkp->openers = 0;
        sdkp->previous_state = 1;
 
+       if (!sdp->request_queue->rq_timeout) {
+               if (sdp->type != TYPE_MOD)
+                       blk_queue_rq_timeout(sdp->request_queue, SD_TIMEOUT);
+               else
+                       blk_queue_rq_timeout(sdp->request_queue,
+                                            SD_MOD_TIMEOUT);
+       }
+
+       device_initialize(&sdkp->dev);
+       sdkp->dev.parent = &sdp->sdev_gendev;
+       sdkp->dev.class = &sd_disk_class;
+       dev_set_name(&sdkp->dev, dev_name(&sdp->sdev_gendev));
+
+       if (device_add(&sdkp->dev))
+               goto out_free_index;
+
+       get_device(&sdp->sdev_gendev);
+
        async_schedule(sd_probe_async, sdkp);
 
        return 0;
@@ -2055,8 +2050,10 @@ static int sd_probe(struct device *dev)
  **/
 static int sd_remove(struct device *dev)
 {
-       struct scsi_disk *sdkp = dev_get_drvdata(dev);
+       struct scsi_disk *sdkp;
 
+       async_synchronize_full();
+       sdkp = dev_get_drvdata(dev);
        device_del(&sdkp->dev);
        del_gendisk(sdkp->disk);
        sd_shutdown(dev);
index 89bd438e1fe30692e006a0bac18c18fc4c21cd86..b33d04250bbc3badeeb3f00644f137402af07bf1 100644 (file)
@@ -2964,7 +2964,7 @@ static int st_int_ioctl(struct scsi_tape *STp, unsigned int cmd_in, unsigned lon
                            !(STp->use_pf & PF_TESTED)) {
                                /* Try the other possible state of Page Format if not
                                   already tried */
-                               STp->use_pf = !STp->use_pf | PF_TESTED;
+                               STp->use_pf = (STp->use_pf ^ USE_PF) | PF_TESTED;
                                st_release_request(SRpnt);
                                SRpnt = NULL;
                                return st_int_ioctl(STp, cmd_in, arg);
index 583966ec82661f9959fe2434c853d50e2139d771..45374d66d26a4268ae9a765d45687c8a0c02c29a 100644 (file)
@@ -737,11 +737,14 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
        struct sym_hcb *np = sym_get_hcb(sdev->host);
        struct sym_tcb *tp = &np->target[sdev->id];
        struct sym_lcb *lp;
+       unsigned long flags;
+       int error;
 
        if (sdev->id >= SYM_CONF_MAX_TARGET || sdev->lun >= SYM_CONF_MAX_LUN)
                return -ENXIO;
 
-       tp->starget = sdev->sdev_target;
+       spin_lock_irqsave(np->s.host->host_lock, flags);
+
        /*
         * Fail the device init if the device is flagged NOSCAN at BOOT in
         * the NVRAM.  This may speed up boot and maintain coherency with
@@ -753,26 +756,37 @@ static int sym53c8xx_slave_alloc(struct scsi_device *sdev)
 
        if (tp->usrflags & SYM_SCAN_BOOT_DISABLED) {
                tp->usrflags &= ~SYM_SCAN_BOOT_DISABLED;
-               starget_printk(KERN_INFO, tp->starget,
+               starget_printk(KERN_INFO, sdev->sdev_target,
                                "Scan at boot disabled in NVRAM\n");
-               return -ENXIO;
+               error = -ENXIO;
+               goto out;
        }
 
        if (tp->usrflags & SYM_SCAN_LUNS_DISABLED) {
-               if (sdev->lun != 0)
-                       return -ENXIO;
-               starget_printk(KERN_INFO, tp->starget,
+               if (sdev->lun != 0) {
+                       error = -ENXIO;
+                       goto out;
+               }
+               starget_printk(KERN_INFO, sdev->sdev_target,
                                "Multiple LUNs disabled in NVRAM\n");
        }
 
        lp = sym_alloc_lcb(np, sdev->id, sdev->lun);
-       if (!lp)
-               return -ENOMEM;
+       if (!lp) {
+               error = -ENOMEM;
+               goto out;
+       }
+       if (tp->nlcb == 1)
+               tp->starget = sdev->sdev_target;
 
        spi_min_period(tp->starget) = tp->usr_period;
        spi_max_width(tp->starget) = tp->usr_width;
 
-       return 0;
+       error = 0;
+out:
+       spin_unlock_irqrestore(np->s.host->host_lock, flags);
+
+       return error;
 }
 
 /*
@@ -819,12 +833,34 @@ static int sym53c8xx_slave_configure(struct scsi_device *sdev)
 static void sym53c8xx_slave_destroy(struct scsi_device *sdev)
 {
        struct sym_hcb *np = sym_get_hcb(sdev->host);
-       struct sym_lcb *lp = sym_lp(&np->target[sdev->id], sdev->lun);
+       struct sym_tcb *tp = &np->target[sdev->id];
+       struct sym_lcb *lp = sym_lp(tp, sdev->lun);
+       unsigned long flags;
+
+       spin_lock_irqsave(np->s.host->host_lock, flags);
+
+       if (lp->busy_itlq || lp->busy_itl) {
+               /*
+                * This really shouldn't happen, but we can't return an error
+                * so let's try to stop all on-going I/O.
+                */
+               starget_printk(KERN_WARNING, tp->starget,
+                              "Removing busy LCB (%d)\n", sdev->lun);
+               sym_reset_scsi_bus(np, 1);
+       }
 
-       if (lp->itlq_tbl)
-               sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK * 4, "ITLQ_TBL");
-       kfree(lp->cb_tags);
-       sym_mfree_dma(lp, sizeof(*lp), "LCB");
+       if (sym_free_lcb(np, sdev->id, sdev->lun) == 0) {
+               /*
+                * It was the last unit for this target.
+                */
+               tp->head.sval        = 0;
+               tp->head.wval        = np->rv_scntl3;
+               tp->head.uval        = 0;
+               tp->tgoal.check_nego = 1;
+               tp->starget          = NULL;
+       }
+
+       spin_unlock_irqrestore(np->s.host->host_lock, flags);
 }
 
 /*
@@ -890,6 +926,8 @@ static void sym_exec_user_command (struct sym_hcb *np, struct sym_usrcmd *uc)
                        if (!((uc->target >> t) & 1))
                                continue;
                        tp = &np->target[t];
+                       if (!tp->nlcb)
+                               continue;
 
                        switch (uc->cmd) {
 
index ffa70d1ed182dcfee4e1a2fc4593db436c16ced7..69ad4945c9369467f0d2fa5095741845d8156ac8 100644 (file)
@@ -1896,6 +1896,15 @@ void sym_start_up(struct Scsi_Host *shost, int reason)
                tp->head.sval = 0;
                tp->head.wval = np->rv_scntl3;
                tp->head.uval = 0;
+               if (tp->lun0p)
+                       tp->lun0p->to_clear = 0;
+               if (tp->lunmp) {
+                       int ln;
+
+                       for (ln = 1; ln < SYM_CONF_MAX_LUN; ln++)
+                               if (tp->lunmp[ln])
+                                       tp->lunmp[ln]->to_clear = 0;
+               }
        }
 
        /*
@@ -4988,7 +4997,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
         */
        if (ln && !tp->lunmp) {
                tp->lunmp = kcalloc(SYM_CONF_MAX_LUN, sizeof(struct sym_lcb *),
-                               GFP_KERNEL);
+                               GFP_ATOMIC);
                if (!tp->lunmp)
                        goto fail;
        }
@@ -5008,6 +5017,7 @@ struct sym_lcb *sym_alloc_lcb (struct sym_hcb *np, u_char tn, u_char ln)
                tp->lun0p = lp;
                tp->head.lun0_sa = cpu_to_scr(vtobus(lp));
        }
+       tp->nlcb++;
 
        /*
         *  Let the itl task point to error handling.
@@ -5084,6 +5094,43 @@ fail:
        return;
 }
 
+/*
+ *  Lun control block deallocation. Returns the number of valid remaing LCBs
+ *  for the target.
+ */
+int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln)
+{
+       struct sym_tcb *tp = &np->target[tn];
+       struct sym_lcb *lp = sym_lp(tp, ln);
+
+       tp->nlcb--;
+
+       if (ln) {
+               if (!tp->nlcb) {
+                       kfree(tp->lunmp);
+                       sym_mfree_dma(tp->luntbl, 256, "LUNTBL");
+                       tp->lunmp = NULL;
+                       tp->luntbl = NULL;
+                       tp->head.luntbl_sa = cpu_to_scr(vtobus(np->badluntbl));
+               } else {
+                       tp->luntbl[ln] = cpu_to_scr(vtobus(&np->badlun_sa));
+                       tp->lunmp[ln] = NULL;
+               }
+       } else {
+               tp->lun0p = NULL;
+               tp->head.lun0_sa = cpu_to_scr(vtobus(&np->badlun_sa));
+       }
+
+       if (lp->itlq_tbl) {
+               sym_mfree_dma(lp->itlq_tbl, SYM_CONF_MAX_TASK*4, "ITLQ_TBL");
+               kfree(lp->cb_tags);
+       }
+
+       sym_mfree_dma(lp, sizeof(*lp), "LCB");
+
+       return tp->nlcb;
+}
+
 /*
  *  Queue a SCSI IO to the controller.
  */
index 9ebc8706b6bfc7b70fcdd884a5328130d53e65d4..053e63c86822d7c30a0d25edc0eac28a80eb5eec 100644 (file)
@@ -401,6 +401,7 @@ struct sym_tcb {
         *  An array of bus addresses is used on reselection.
         */
        u32     *luntbl;        /* LCBs bus address table       */
+       int     nlcb;           /* Number of valid LCBs (including LUN #0) */
 
        /*
         *  LUN table used by the C code.
@@ -1065,6 +1066,7 @@ int sym_clear_tasks(struct sym_hcb *np, int cam_status, int target, int lun, int
 struct sym_ccb *sym_get_ccb(struct sym_hcb *np, struct scsi_cmnd *cmd, u_char tag_order);
 void sym_free_ccb(struct sym_hcb *np, struct sym_ccb *cp);
 struct sym_lcb *sym_alloc_lcb(struct sym_hcb *np, u_char tn, u_char ln);
+int sym_free_lcb(struct sym_hcb *np, u_char tn, u_char ln);
 int sym_queue_scsiio(struct sym_hcb *np, struct scsi_cmnd *csio, struct sym_ccb *cp);
 int sym_abort_scsiio(struct sym_hcb *np, struct scsi_cmnd *ccb, int timed_out);
 int sym_reset_scsi_target(struct sym_hcb *np, int target);
index b1512c4bb8c71d076dd5d27e1b7f08cda00c2e75..24667eedc02329554735f72420cbc04d99faa871 100644 (file)
@@ -175,10 +175,4 @@ int exofs_async_op(struct osd_request *or,
 
 int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr);
 
-int osd_req_read_kern(struct osd_request *or,
-       const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
-
-int osd_req_write_kern(struct osd_request *or,
-       const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
-
 #endif /*ifndef __EXOFS_COM_H__*/
index ba8d9fab46932b26394331bb4d93ab60ba2736e5..77d0a295eb1cd3e95443da014fc9801cf64f5aff 100644 (file)
@@ -59,10 +59,9 @@ static void _pcol_init(struct page_collect *pcol, unsigned expected_pages,
                struct inode *inode)
 {
        struct exofs_sb_info *sbi = inode->i_sb->s_fs_info;
-       struct request_queue *req_q = sbi->s_dev->scsi_device->request_queue;
 
        pcol->sbi = sbi;
-       pcol->req_q = req_q;
+       pcol->req_q = osd_request_queue(sbi->s_dev);
        pcol->inode = inode;
        pcol->expected_pages = expected_pages;
 
@@ -266,7 +265,7 @@ static int read_exec(struct page_collect *pcol, bool is_sync)
                goto err;
        }
 
-       osd_req_read(or, &obj, pcol->bio, i_start);
+       osd_req_read(or, &obj, i_start, pcol->bio, pcol->length);
 
        if (is_sync) {
                exofs_sync_op(or, pcol->sbi->s_timeout, oi->i_cred);
@@ -522,7 +521,8 @@ static int write_exec(struct page_collect *pcol)
 
        *pcol_copy = *pcol;
 
-       osd_req_write(or, &obj, pcol_copy->bio, i_start);
+       pcol_copy->bio->bi_rw |= (1 << BIO_RW); /* FIXME: bio_set_dir() */
+       osd_req_write(or, &obj, i_start, pcol_copy->bio, pcol_copy->length);
        ret = exofs_async_op(or, writepages_done, pcol_copy, oi->i_cred);
        if (unlikely(ret)) {
                EXOFS_ERR("write_exec: exofs_async_op() Faild\n");
index 06ca92672eb5d6118ee644074019a650a96125f3..b3d2ccb87aaa8981374e824066bfd2aa608fa86a 100644 (file)
@@ -125,29 +125,3 @@ int extract_attr_from_req(struct osd_request *or, struct osd_attr *attr)
 
        return -EIO;
 }
-
-int osd_req_read_kern(struct osd_request *or,
-       const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
-{
-       struct request_queue *req_q = or->osd_dev->scsi_device->request_queue;
-       struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
-
-       if (!bio)
-               return -ENOMEM;
-
-       osd_req_read(or, obj, bio, offset);
-       return 0;
-}
-
-int osd_req_write_kern(struct osd_request *or,
-       const struct osd_obj_id *obj, u64 offset, void* buff, u64 len)
-{
-       struct request_queue *req_q = or->osd_dev->scsi_device->request_queue;
-       struct bio *bio = bio_map_kern(req_q, buff, len, GFP_KERNEL);
-
-       if (!bio)
-               return -ENOMEM;
-
-       osd_req_write(or, obj, bio, offset);
-       return 0;
-}
index cfe4fe1b7132016c4a30f480895a27812c09c959..60e8934d10b5e070ab2289a19c787935026722b1 100644 (file)
@@ -79,6 +79,7 @@
 #define ETH_P_AOE      0x88A2          /* ATA over Ethernet            */
 #define ETH_P_TIPC     0x88CA          /* TIPC                         */
 #define ETH_P_FCOE     0x8906          /* Fibre Channel over Ethernet  */
+#define ETH_P_FIP      0x8914          /* FCoE Initialization Protocol */
 #define ETH_P_EDSA     0xDADA          /* Ethertype DSA [ NOT AN OFFICIALLY REGISTERED ID ] */
 
 /*
index 0627a9ae6347c206f7388dd9dcd8959e52ae55c6..3d138c1fcf8abb14fe2f024c4760448e4a965f4c 100644 (file)
  * http://www.t11.org/ftp/t11/pub/fc/bb-5/08-543v1.pdf
  */
 
-/*
- * The FIP ethertype eventually goes in net/if_ether.h.
- */
-#ifndef ETH_P_FIP
-#define ETH_P_FIP      0x8914  /* FIP Ethertype */
-#endif
-
 #define FIP_DEF_PRI    128     /* default selection priority */
 #define FIP_DEF_FC_MAP 0x0efc00 /* default FCoE MAP (MAC OUI) value */
 #define FIP_DEF_FKA    8000    /* default FCF keep-alive/advert period (mS) */
index d0ed5226f8c4f02db9ad40df1c8455d83c053f92..4426f00da5ffcbf1ca9f92409bbb860479e60a18 100644 (file)
 #define ISCSI_IF_H
 
 #include <scsi/iscsi_proto.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+
+#define ISCSI_NL_GRP_ISCSID    1
+#define ISCSI_NL_GRP_UIP       2
 
 #define UEVENT_BASE                    10
 #define KEVENT_BASE                    100
@@ -50,7 +55,10 @@ enum iscsi_uevent_e {
        ISCSI_UEVENT_TGT_DSCVR          = UEVENT_BASE + 15,
        ISCSI_UEVENT_SET_HOST_PARAM     = UEVENT_BASE + 16,
        ISCSI_UEVENT_UNBIND_SESSION     = UEVENT_BASE + 17,
-       ISCSI_UEVENT_CREATE_BOUND_SESSION       = UEVENT_BASE + 18,
+       ISCSI_UEVENT_CREATE_BOUND_SESSION               = UEVENT_BASE + 18,
+       ISCSI_UEVENT_TRANSPORT_EP_CONNECT_THROUGH_HOST  = UEVENT_BASE + 19,
+
+       ISCSI_UEVENT_PATH_UPDATE        = UEVENT_BASE + 20,
 
        /* up events */
        ISCSI_KEVENT_RECV_PDU           = KEVENT_BASE + 1,
@@ -59,6 +67,9 @@ enum iscsi_uevent_e {
        ISCSI_KEVENT_DESTROY_SESSION    = KEVENT_BASE + 4,
        ISCSI_KEVENT_UNBIND_SESSION     = KEVENT_BASE + 5,
        ISCSI_KEVENT_CREATE_SESSION     = KEVENT_BASE + 6,
+
+       ISCSI_KEVENT_PATH_REQ           = KEVENT_BASE + 7,
+       ISCSI_KEVENT_IF_DOWN            = KEVENT_BASE + 8,
 };
 
 enum iscsi_tgt_dscvr {
@@ -131,6 +142,10 @@ struct iscsi_uevent {
                struct msg_transport_connect {
                        uint32_t        non_blocking;
                } ep_connect;
+               struct msg_transport_connect_through_host {
+                       uint32_t        host_no;
+                       uint32_t        non_blocking;
+               } ep_connect_through_host;
                struct msg_transport_poll {
                        uint64_t        ep_handle;
                        uint32_t        timeout_ms;
@@ -154,6 +169,9 @@ struct iscsi_uevent {
                        uint32_t        param; /* enum iscsi_host_param */
                        uint32_t        len;
                } set_host_param;
+               struct msg_set_path {
+                       uint32_t        host_no;
+               } set_path;
        } u;
        union {
                /* messages k -> u */
@@ -187,9 +205,38 @@ struct iscsi_uevent {
                struct msg_transport_connect_ret {
                        uint64_t        handle;
                } ep_connect_ret;
+               struct msg_req_path {
+                       uint32_t        host_no;
+               } req_path;
+               struct msg_notify_if_down {
+                       uint32_t        host_no;
+               } notify_if_down;
        } r;
 } __attribute__ ((aligned (sizeof(uint64_t))));
 
+/*
+ * To keep the struct iscsi_uevent size the same for userspace code
+ * compatibility, the main structure for ISCSI_UEVENT_PATH_UPDATE and
+ * ISCSI_KEVENT_PATH_REQ is defined separately and comes after the
+ * struct iscsi_uevent in the NETLINK_ISCSI message.
+ */
+struct iscsi_path {
+       uint64_t        handle;
+       uint8_t         mac_addr[6];
+       uint8_t         mac_addr_old[6];
+       uint32_t        ip_addr_len;    /* 4 or 16 */
+       union {
+               struct in_addr  v4_addr;
+               struct in6_addr v6_addr;
+       } src;
+       union {
+               struct in_addr  v4_addr;
+               struct in6_addr v6_addr;
+       } dst;
+       uint16_t        vlan_id;
+       uint16_t        pmtu;
+} __attribute__ ((aligned (sizeof(uint64_t))));
+
 /*
  * Common error codes
  */
index 45f9cc642c46572623eaf09568332edcd17f475c..ebdd9f4cf070da669c2b80a817c9b156e51f360e 100644 (file)
@@ -679,6 +679,7 @@ struct fc_lport {
        unsigned int            e_d_tov;
        unsigned int            r_a_tov;
        u8                      max_retry_count;
+       u8                      max_rport_retry_count;
        u16                     link_speed;
        u16                     link_supported_speeds;
        u16                     lro_xid;        /* max xid for fcoe lro */
index 0289f5745fb9a687e92f46625a8bd38fa81407d0..196525cd402f6a86d09d208557f03217dd9bc08a 100644 (file)
@@ -82,9 +82,12 @@ enum {
 
 
 enum {
+       ISCSI_TASK_FREE,
        ISCSI_TASK_COMPLETED,
        ISCSI_TASK_PENDING,
        ISCSI_TASK_RUNNING,
+       ISCSI_TASK_ABRT_TMF,            /* aborted due to TMF */
+       ISCSI_TASK_ABRT_SESS_RECOV,     /* aborted due to session recovery */
 };
 
 struct iscsi_r2t_info {
@@ -181,9 +184,7 @@ struct iscsi_conn {
 
        /* xmit */
        struct list_head        mgmtqueue;      /* mgmt (control) xmit queue */
-       struct list_head        mgmt_run_list;  /* list of control tasks */
-       struct list_head        xmitqueue;      /* data-path cmd queue */
-       struct list_head        run_list;       /* list of cmds in progress */
+       struct list_head        cmdqueue;       /* data-path cmd queue */
        struct list_head        requeue;        /* tasks needing another run */
        struct work_struct      xmitwork;       /* per-conn. xmit workqueue */
        unsigned long           suspend_tx;     /* suspend Tx */
@@ -406,6 +407,7 @@ extern int __iscsi_complete_pdu(struct iscsi_conn *, struct iscsi_hdr *,
                                char *, int);
 extern int iscsi_verify_itt(struct iscsi_conn *, itt_t);
 extern struct iscsi_task *iscsi_itt_to_ctask(struct iscsi_conn *, itt_t);
+extern struct iscsi_task *iscsi_itt_to_task(struct iscsi_conn *, itt_t);
 extern void iscsi_requeue_task(struct iscsi_task *task);
 extern void iscsi_put_task(struct iscsi_task *task);
 extern void __iscsi_get_task(struct iscsi_task *task);
index f888a6fda07302b3df0dffc731e4c04aa0985863..56e920ade3269f4895546c13bc419bad045f2400 100644 (file)
@@ -29,6 +29,7 @@ enum {
        OSD_APAGE_PARTITION_INFORMATION = OSD_APAGE_PARTITION_FIRST + 1,
        OSD_APAGE_PARTITION_QUOTAS      = OSD_APAGE_PARTITION_FIRST + 2,
        OSD_APAGE_PARTITION_TIMESTAMP   = OSD_APAGE_PARTITION_FIRST + 3,
+       OSD_APAGE_PARTITION_ATTR_ACCESS = OSD_APAGE_PARTITION_FIRST + 4,
        OSD_APAGE_PARTITION_SECURITY    = OSD_APAGE_PARTITION_FIRST + 5,
        OSD_APAGE_PARTITION_LAST        = 0x5FFFFFFF,
 
@@ -51,7 +52,9 @@ enum {
        OSD_APAGE_RESERVED_TYPE_LAST    = 0xEFFFFFFF,
 
        OSD_APAGE_COMMON_FIRST          = 0xF0000000,
-       OSD_APAGE_COMMON_LAST           = 0xFFFFFFFE,
+       OSD_APAGE_COMMON_LAST           = 0xFFFFFFFD,
+
+       OSD_APAGE_CURRENT_COMMAND       = 0xFFFFFFFE,
 
        OSD_APAGE_REQUEST_ALL           = 0xFFFFFFFF,
 };
@@ -106,10 +109,30 @@ enum {
        OSD_ATTR_RI_PRODUCT_REVISION_LEVEL   = 0x7,   /* 4        */
        OSD_ATTR_RI_PRODUCT_SERIAL_NUMBER    = 0x8,   /* variable */
        OSD_ATTR_RI_OSD_NAME                 = 0x9,   /* variable */
+       OSD_ATTR_RI_MAX_CDB_CONTINUATION_LEN = 0xA,   /* 4        */
        OSD_ATTR_RI_TOTAL_CAPACITY           = 0x80,  /* 8        */
        OSD_ATTR_RI_USED_CAPACITY            = 0x81,  /* 8        */
        OSD_ATTR_RI_NUMBER_OF_PARTITIONS     = 0xC0,  /* 8        */
        OSD_ATTR_RI_CLOCK                    = 0x100, /* 6        */
+       OARI_DEFAULT_ISOLATION_METHOD        = 0X110, /* 1        */
+       OARI_SUPPORTED_ISOLATION_METHODS     = 0X111, /* 32       */
+
+       OARI_DATA_ATOMICITY_GUARANTEE                   = 0X120,   /* 8       */
+       OARI_DATA_ATOMICITY_ALIGNMENT                   = 0X121,   /* 8       */
+       OARI_ATTRIBUTES_ATOMICITY_GUARANTEE             = 0X122,   /* 8       */
+       OARI_DATA_ATTRIBUTES_ATOMICITY_MULTIPLIER       = 0X123,   /* 1       */
+
+       OARI_MAXIMUM_SNAPSHOTS_COUNT                    = 0X1C1,    /* 0 or 4 */
+       OARI_MAXIMUM_CLONES_COUNT                       = 0X1C2,    /* 0 or 4 */
+       OARI_MAXIMUM_BRANCH_DEPTH                       = 0X1CC,    /* 0 or 4 */
+       OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_FIRST  = 0X200,    /* 0 or 4 */
+       OARI_SUPPORTED_OBJECT_DUPLICATION_METHOD_LAST   = 0X2ff,    /* 0 or 4 */
+       OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_FIRST = 0X300,    /* 0 or 4 */
+       OARI_SUPPORTED_TIME_OF_DUPLICATION_METHOD_LAST  = 0X30F,    /* 0 or 4 */
+       OARI_SUPPORT_FOR_DUPLICATED_OBJECT_FREEZING     = 0X310,    /* 0 or 4 */
+       OARI_SUPPORT_FOR_SNAPSHOT_REFRESHING            = 0X311,    /* 0 or 1 */
+       OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_FIRST = 0X7000001,/* 0 or 4 */
+       OARI_SUPPORTED_CDB_CONTINUATION_DESC_TYPE_LAST  = 0X700FFFF,/* 0 or 4 */
 };
 /* Root_Information_attributes_page does not have a get_page structure */
 
@@ -120,7 +143,15 @@ enum {
        OSD_ATTR_PI_PARTITION_ID            = 0x1,     /* 8        */
        OSD_ATTR_PI_USERNAME                = 0x9,     /* variable */
        OSD_ATTR_PI_USED_CAPACITY           = 0x81,    /* 8        */
+       OSD_ATTR_PI_USED_CAPACITY_INCREMENT = 0x84,    /* 0 or 8   */
        OSD_ATTR_PI_NUMBER_OF_OBJECTS       = 0xC1,    /* 8        */
+
+       OSD_ATTR_PI_ACTUAL_DATA_SPACE                      = 0xD1, /* 0 or 8 */
+       OSD_ATTR_PI_RESERVED_DATA_SPACE                    = 0xD2, /* 0 or 8 */
+       OSD_ATTR_PI_DEFAULT_SNAPSHOT_DUPLICATION_METHOD    = 0x200,/* 0 or 4 */
+       OSD_ATTR_PI_DEFAULT_CLONE_DUPLICATION_METHOD       = 0x201,/* 0 or 4 */
+       OSD_ATTR_PI_DEFAULT_SP_TIME_OF_DUPLICATION         = 0x300,/* 0 or 4 */
+       OSD_ATTR_PI_DEFAULT_CLONE_TIME_OF_DUPLICATION      = 0x301,/* 0 or 4 */
 };
 /* Partition Information attributes page does not have a get_page structure */
 
@@ -131,6 +162,7 @@ enum {
        OSD_ATTR_CI_PARTITION_ID           = 0x1,       /* 8        */
        OSD_ATTR_CI_COLLECTION_OBJECT_ID   = 0x2,       /* 8        */
        OSD_ATTR_CI_USERNAME               = 0x9,       /* variable */
+       OSD_ATTR_CI_COLLECTION_TYPE        = 0xA,       /* 1        */
        OSD_ATTR_CI_USED_CAPACITY          = 0x81,      /* 8        */
 };
 /* Collection Information attributes page does not have a get_page structure */
@@ -144,6 +176,8 @@ enum {
        OSD_ATTR_OI_USERNAME             = 0x9,       /* variable */
        OSD_ATTR_OI_USED_CAPACITY        = 0x81,      /* 8        */
        OSD_ATTR_OI_LOGICAL_LENGTH       = 0x82,      /* 8        */
+       SD_ATTR_OI_ACTUAL_DATA_SPACE     = 0XD1,      /* 0 OR 8   */
+       SD_ATTR_OI_RESERVED_DATA_SPACE   = 0XD2,      /* 0 OR 8   */
 };
 /* Object Information attributes page does not have a get_page structure */
 
@@ -248,7 +282,18 @@ struct object_timestamps_attributes_page {
        struct osd_timestamp data_modified_time;
 }  __packed;
 
-/* 7.1.2.19 Collections attributes page */
+/* OSD2r05: 7.1.3.19 Attributes Access attributes page
+ * (OSD_APAGE_PARTITION_ATTR_ACCESS)
+ *
+ * each attribute is of the form below. Total array length is deduced
+ * from the attribute's length
+ * (See allowed_attributes_access of the struct osd_cap_object_descriptor)
+ */
+struct attributes_access_attr {
+       struct osd_attributes_list_attrid attr_list[0];
+} __packed;
+
+/* OSD2r05: 7.1.2.21 Collections attributes page */
 /* TBD */
 
 /* 7.1.2.20 Root Policy/Security attributes page (OSD_APAGE_ROOT_SECURITY) */
@@ -324,4 +369,29 @@ struct object_security_attributes_page {
        __be32 policy_access_tag;
 }  __packed;
 
+/* OSD2r05: 7.1.3.31 Current Command attributes page
+ * (OSD_APAGE_CURRENT_COMMAND)
+ */
+enum {
+       OSD_ATTR_CC_RESPONSE_INTEGRITY_CHECK_VALUE     = 0x1, /* 32  */
+       OSD_ATTR_CC_OBJECT_TYPE                        = 0x2, /* 1   */
+       OSD_ATTR_CC_PARTITION_ID                       = 0x3, /* 8   */
+       OSD_ATTR_CC_OBJECT_ID                          = 0x4, /* 8   */
+       OSD_ATTR_CC_STARTING_BYTE_ADDRESS_OF_APPEND    = 0x5, /* 8   */
+       OSD_ATTR_CC_CHANGE_IN_USED_CAPACITY            = 0x6, /* 8   */
+};
+
+/*TBD: osdv1_current_command_attributes_page */
+
+struct osdv2_current_command_attributes_page {
+       struct osd_attr_page_header hdr;  /* id=0xFFFFFFFE, size=0x44 */
+       u8 response_integrity_check_value[OSD_CRYPTO_KEYID_SIZE];
+       u8 object_type;
+       u8 reserved[3];
+       __be64 partition_id;
+       __be64 object_id;
+       __be64 starting_byte_address_of_append;
+       __be64 change_in_used_capacity;
+};
+
 #endif /*ndef __OSD_ATTRIBUTES_H__*/
index b24d9616eb469f2afbcdd02f32dbdd9cae6eae3d..02bd9f7163570512e19c0e98e17295a5f47561e8 100644 (file)
@@ -18,6 +18,7 @@
 #include "osd_types.h"
 
 #include <linux/blkdev.h>
+#include <scsi/scsi_device.h>
 
 /* Note: "NI" in comments below means "Not Implemented yet" */
 
@@ -47,6 +48,7 @@ enum osd_std_version {
  */
 struct osd_dev {
        struct scsi_device *scsi_device;
+       struct file *file;
        unsigned def_timeout;
 
 #ifdef OSD_VER1_SUPPORT
@@ -69,6 +71,10 @@ void osd_dev_fini(struct osd_dev *od);
 
 /* some hi level device operations */
 int osd_auto_detect_ver(struct osd_dev *od, void *caps);    /* GFP_KERNEL */
+static inline struct request_queue *osd_request_queue(struct osd_dev *od)
+{
+       return od->scsi_device->request_queue;
+}
 
 /* we might want to use function vector in the future */
 static inline void osd_dev_set_ver(struct osd_dev *od, enum osd_std_version v)
@@ -363,7 +369,9 @@ void osd_req_create_object(struct osd_request *or, struct osd_obj_id *);
 void osd_req_remove_object(struct osd_request *or, struct osd_obj_id *);
 
 void osd_req_write(struct osd_request *or,
-       const struct osd_obj_id *, struct bio *data_out, u64 offset);
+       const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
+int osd_req_write_kern(struct osd_request *or,
+       const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
 void osd_req_append(struct osd_request *or,
        const struct osd_obj_id *, struct bio *data_out);/* NI */
 void osd_req_create_write(struct osd_request *or,
@@ -378,7 +386,9 @@ void osd_req_flush_object(struct osd_request *or,
        /*V2*/ u64 offset, /*V2*/ u64 len);
 
 void osd_req_read(struct osd_request *or,
-       const struct osd_obj_id *, struct bio *data_in, u64 offset);
+       const struct osd_obj_id *obj, u64 offset, struct bio *bio, u64 len);
+int osd_req_read_kern(struct osd_request *or,
+       const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
 
 /*
  * Root/Partition/Collection/Object Attributes commands
index 62b2ab8c69d45318ba93419184b608348928d5e2..2cc8e8b1cc19da61757676325ab3ae9cd9050e2d 100644 (file)
@@ -303,7 +303,15 @@ enum osd_service_actions {
        OSD_ACT_V2(REMOVE_MEMBER_OBJECTS,       0x21)
        OSD_ACT_V2(GET_MEMBER_ATTRIBUTES,       0x22)
        OSD_ACT_V2(SET_MEMBER_ATTRIBUTES,       0x23)
+
+       OSD_ACT_V2(CREATE_CLONE,                0x28)
+       OSD_ACT_V2(CREATE_SNAPSHOT,             0x29)
+       OSD_ACT_V2(DETACH_CLONE,                0x2A)
+       OSD_ACT_V2(REFRESH_SNAPSHOT_CLONE,      0x2B)
+       OSD_ACT_V2(RESTORE_PARTITION_FROM_SNAPSHOT, 0x2C)
+
        OSD_ACT_V2(READ_MAP,                    0x31)
+       OSD_ACT_V2(READ_MAPS_COMPARE,           0x32)
 
        OSD_ACT_V1_V2(PERFORM_SCSI_COMMAND,     0x8F7E, 0x8F7C)
        OSD_ACT_V1_V2(SCSI_TASK_MANAGEMENT,     0x8F7F, 0x8F7D)
index 457588e1119bd4cf3c429ffe6cea6aa8ded3deff..349c7f30720d8f652c79b1db0251f3a44180505a 100644 (file)
@@ -126,12 +126,14 @@ struct iscsi_transport {
                               int *index, int *age);
 
        void (*session_recovery_timedout) (struct iscsi_cls_session *session);
-       struct iscsi_endpoint *(*ep_connect) (struct sockaddr *dst_addr,
+       struct iscsi_endpoint *(*ep_connect) (struct Scsi_Host *shost,
+                                             struct sockaddr *dst_addr,
                                              int non_blocking);
        int (*ep_poll) (struct iscsi_endpoint *ep, int timeout_ms);
        void (*ep_disconnect) (struct iscsi_endpoint *ep);
        int (*tgt_dscvr) (struct Scsi_Host *shost, enum iscsi_tgt_dscvr type,
                          uint32_t enable, struct sockaddr *dst_addr);
+       int (*set_path) (struct Scsi_Host *shost, struct iscsi_path *params);
 };
 
 /*
@@ -148,6 +150,10 @@ extern void iscsi_conn_error_event(struct iscsi_cls_conn *conn,
 extern int iscsi_recv_pdu(struct iscsi_cls_conn *conn, struct iscsi_hdr *hdr,
                          char *data, uint32_t data_size);
 
+extern int iscsi_offload_mesg(struct Scsi_Host *shost,
+                             struct iscsi_transport *transport, uint32_t type,
+                             char *data, uint16_t data_size);
+
 struct iscsi_cls_conn {
        struct list_head conn_list;     /* item in connlist */
        void *dd_data;                  /* LLD private data */