]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'mad' into for-linus
authorRoland Dreier <rolandd@cisco.com>
Fri, 11 Sep 2009 04:19:45 +0000 (21:19 -0700)
committerRoland Dreier <rolandd@cisco.com>
Fri, 11 Sep 2009 04:19:45 +0000 (21:19 -0700)
Conflicts:
drivers/infiniband/core/mad.c

61 files changed:
MAINTAINERS
drivers/infiniband/core/iwcm.c
drivers/infiniband/core/mad.c
drivers/infiniband/core/multicast.c
drivers/infiniband/core/sa_query.c
drivers/infiniband/core/uverbs_main.c
drivers/infiniband/hw/amso1100/c2.c
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/cxgb3/cxio_hal.c
drivers/infiniband/hw/cxgb3/cxio_wr.h
drivers/infiniband/hw/cxgb3/iwch.c
drivers/infiniband/hw/cxgb3/iwch_cm.c
drivers/infiniband/hw/cxgb3/iwch_cm.h
drivers/infiniband/hw/cxgb3/iwch_mem.c
drivers/infiniband/hw/cxgb3/iwch_provider.c
drivers/infiniband/hw/cxgb3/iwch_qp.c
drivers/infiniband/hw/ehca/ehca_main.c
drivers/infiniband/hw/ehca/ehca_reqs.c
drivers/infiniband/hw/ehca/ehca_sqp.c
drivers/infiniband/hw/ipath/ipath_file_ops.c
drivers/infiniband/hw/ipath/ipath_mad.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx4/mlx4_ib.h
drivers/infiniband/hw/mlx4/qp.c
drivers/infiniband/hw/mthca/mthca_catas.c
drivers/infiniband/hw/mthca/mthca_config_reg.h
drivers/infiniband/hw/mthca/mthca_dev.h
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/infiniband/hw/mthca/mthca_main.c
drivers/infiniband/hw/mthca/mthca_provider.c
drivers/infiniband/hw/mthca/mthca_provider.h
drivers/infiniband/hw/mthca/mthca_qp.c
drivers/infiniband/hw/mthca/mthca_reset.c
drivers/infiniband/hw/nes/nes.h
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/nes/nes_cm.h
drivers/infiniband/hw/nes/nes_hw.c
drivers/infiniband/hw/nes/nes_hw.h
drivers/infiniband/hw/nes/nes_utils.c
drivers/infiniband/hw/nes/nes_verbs.c
drivers/infiniband/hw/nes/nes_verbs.h
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/ipoib/ipoib_ib.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/net/cxgb3/cxgb3_main.c
drivers/net/cxgb3/cxgb3_offload.c
drivers/net/cxgb3/cxgb3_offload.h
drivers/net/mlx4/cq.c
drivers/net/mlx4/eq.c
drivers/net/mlx4/icm.c
drivers/net/mlx4/main.c
drivers/net/mlx4/mcg.c
drivers/net/mlx4/mlx4.h
drivers/net/mlx4/mr.c
drivers/net/mlx4/pd.c
drivers/net/mlx4/profile.c
drivers/net/mlx4/qp.c
drivers/net/mlx4/reset.c
drivers/net/mlx4/srq.c
drivers/scsi/cxgb3i/cxgb3i_init.c

index 8dca9d89c6c1d1d04e65e58e881e902b0e450d09..989ff1149390c783823cb5e671e5962574ffad9e 100644 (file)
@@ -439,7 +439,7 @@ F:  drivers/hwmon/ams/
 AMSO1100 RNIC DRIVER
 M:     Tom Tucker <tom@opengridcomputing.com>
 M:     Steve Wise <swise@opengridcomputing.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 S:     Maintained
 F:     drivers/infiniband/hw/amso1100/
 
@@ -1494,7 +1494,7 @@ F:        drivers/net/cxgb3/
 
 CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
 M:     Steve Wise <swise@chelsio.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 W:     http://www.openfabrics.org
 S:     Supported
 F:     drivers/infiniband/hw/cxgb3/
@@ -1868,7 +1868,7 @@ F:        fs/efs/
 EHCA (IBM GX bus InfiniBand adapter) DRIVER
 M:     Hoang-Nam Nguyen <hnguyen@de.ibm.com>
 M:     Christoph Raisch <raisch@de.ibm.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 S:     Supported
 F:     drivers/infiniband/hw/ehca/
 
@@ -2552,7 +2552,7 @@ INFINIBAND SUBSYSTEM
 M:     Roland Dreier <rolandd@cisco.com>
 M:     Sean Hefty <sean.hefty@intel.com>
 M:     Hal Rosenstock <hal.rosenstock@gmail.com>
-L:     general@lists.openfabrics.org (moderated for non-subscribers)
+L:     linux-rdma@vger.kernel.org
 W:     http://www.openib.org/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
 S:     Supported
@@ -2729,7 +2729,7 @@ F:        drivers/net/ipg.c
 
 IPATH DRIVER
 M:     Ralph Campbell <infinipath@qlogic.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 T:     git git://git.qlogic.com/ipath-linux-2.6
 S:     Supported
 F:     drivers/infiniband/hw/ipath/
@@ -3485,7 +3485,7 @@ F:        drivers/scsi/NCR_D700.*
 NETEFFECT IWARP RNIC DRIVER (IW_NES)
 M:     Faisal Latif <faisal.latif@intel.com>
 M:     Chien Tung <chien.tin.tung@intel.com>
-L:     general@lists.openfabrics.org
+L:     linux-rdma@vger.kernel.org
 W:     http://www.neteffect.com
 S:     Supported
 F:     drivers/infiniband/hw/nes/
index 8f9509e1ebf76494217b7d48f6b7bdef307ac3b4..55d093a36ae48a29a09877176b1448e279555d00 100644 (file)
@@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)
                 * In either case, must tell the provider to reject.
                 */
                cm_id_priv->state = IW_CM_STATE_DESTROYING;
+               cm_id->device->iwcm->reject(cm_id, NULL, 0);
                break;
        case IW_CM_STATE_CONN_SENT:
        case IW_CM_STATE_DESTROYING:
index 0f7da241fa05d28d7a367b946b4b12a8a682ad99..7522008fda86880f759128e694d9df0cda68c764 100644 (file)
@@ -60,8 +60,7 @@ static struct list_head ib_mad_port_list;
 static u32 ib_mad_client_id = 0;
 
 /* Port list lock */
-static spinlock_t ib_mad_port_list_lock;
-
+static DEFINE_SPINLOCK(ib_mad_port_list_lock);
 
 /* Forward declarations */
 static int method_in_use(struct ib_mad_mgmt_method_table **method,
@@ -2999,8 +2998,6 @@ static int __init ib_mad_init_module(void)
        mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE);
        mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);
 
-       spin_lock_init(&ib_mad_port_list_lock);
-
        ib_mad_cache = kmem_cache_create("ib_mad",
                                         sizeof(struct ib_mad_private),
                                         0,
@@ -3036,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)
 
 module_init(ib_mad_init_module);
 module_exit(ib_mad_cleanup_module);
-
index 107f170c57cdb12cc05786551b21d29b01ecc6de..8d82ba17135366317ba2d3e856c78c4cfb97829a 100644 (file)
@@ -106,6 +106,8 @@ struct mcast_group {
        struct ib_sa_query      *query;
        int                     query_id;
        u16                     pkey_index;
+       u8                      leave_state;
+       int                     retries;
 };
 
 struct mcast_member {
@@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)
 
        rec = group->rec;
        rec.join_state = leave_state;
+       group->leave_state = leave_state;
 
        ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,
                                       port->port_num, IB_SA_METHOD_DELETE, &rec,
@@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,
 {
        struct mcast_group *group = context;
 
-       mcast_work_handler(&group->work);
+       if (status && group->retries > 0 &&
+           !send_leave(group, group->leave_state))
+               group->retries--;
+       else
+               mcast_work_handler(&group->work);
 }
 
 static struct mcast_group *acquire_group(struct mcast_port *port,
@@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,
        if (!group)
                return NULL;
 
+       group->retries = 3;
        group->port = port;
        group->rec.mgid = *mgid;
        group->pkey_index = MCAST_INVALID_PKEY_INDEX;
index 1865049e80f7548be1c814e6a9bfc73229f5195a..82543716d59ef74ce57029e03fd1e09aef6032a3 100644 (file)
@@ -109,10 +109,10 @@ static struct ib_client sa_client = {
        .remove = ib_sa_remove_one
 };
 
-static spinlock_t idr_lock;
+static DEFINE_SPINLOCK(idr_lock);
 static DEFINE_IDR(query_idr);
 
-static spinlock_t tid_lock;
+static DEFINE_SPINLOCK(tid_lock);
 static u32 tid;
 
 #define PATH_REC_FIELD(field) \
@@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)
 {
        int ret;
 
-       spin_lock_init(&idr_lock);
-       spin_lock_init(&tid_lock);
-
        get_random_bytes(&tid, sizeof tid);
 
        ret = ib_register_client(&sa_client);
index eb36a81dd09bff2d544675de4c37c447614d3262..d3fff9e008a3e01f1f81795101738ae236b02dcb 100644 (file)
@@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);
 DEFINE_IDR(ib_uverbs_qp_idr);
 DEFINE_IDR(ib_uverbs_srq_idr);
 
-static spinlock_t map_lock;
+static DEFINE_SPINLOCK(map_lock);
 static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];
 static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES);
 
@@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,
 
        if (hdr.command < 0                             ||
            hdr.command >= ARRAY_SIZE(uverbs_cmd_table) ||
-           !uverbs_cmd_table[hdr.command]              ||
-           !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
+           !uverbs_cmd_table[hdr.command])
                return -EINVAL;
 
        if (!file->ucontext &&
            hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)
                return -EINVAL;
 
+       if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command)))
+               return -ENOSYS;
+
        return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,
                                             hdr.in_words * 4, hdr.out_words * 4);
 }
@@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)
 {
        int ret;
 
-       spin_lock_init(&map_lock);
-
        ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,
                                     "infiniband_verbs");
        if (ret) {
index 0cfbb6d2f762b5699c4edef953ab5757196dae5c..8250740c94b09bce8f5c73c6928d895cd24a28c3 100644 (file)
@@ -86,11 +86,7 @@ MODULE_DEVICE_TABLE(pci, c2_pci_table);
 
 static void c2_print_macaddr(struct net_device *netdev)
 {
-       pr_debug("%s: MAC %02X:%02X:%02X:%02X:%02X:%02X, "
-               "IRQ %u\n", netdev->name,
-               netdev->dev_addr[0], netdev->dev_addr[1], netdev->dev_addr[2],
-               netdev->dev_addr[3], netdev->dev_addr[4], netdev->dev_addr[5],
-               netdev->irq);
+       pr_debug("%s: MAC %pM, IRQ %u\n", netdev->name, netdev->dev_addr, netdev->irq);
 }
 
 static void c2_set_rxbufsize(struct c2_port *c2_port)
index f1948fad85d7cd1586c009bd7e89d53f2c30375b..ad723bd8bf498090560c72bc78a1a06dc72e9730 100644 (file)
@@ -780,11 +780,11 @@ int c2_register_device(struct c2_dev *dev)
        /* Register pseudo network device */
        dev->pseudo_netdev = c2_pseudo_netdev_init(dev);
        if (!dev->pseudo_netdev)
-               goto out3;
+               goto out;
 
        ret = register_netdev(dev->pseudo_netdev);
        if (ret)
-               goto out2;
+               goto out_free_netdev;
 
        pr_debug("%s:%u\n", __func__, __LINE__);
        strlcpy(dev->ibdev.name, "amso%d", IB_DEVICE_NAME_MAX);
@@ -851,6 +851,10 @@ int c2_register_device(struct c2_dev *dev)
        dev->ibdev.post_recv = c2_post_receive;
 
        dev->ibdev.iwcm = kmalloc(sizeof(*dev->ibdev.iwcm), GFP_KERNEL);
+       if (dev->ibdev.iwcm == NULL) {
+               ret = -ENOMEM;
+               goto out_unregister_netdev;
+       }
        dev->ibdev.iwcm->add_ref = c2_add_ref;
        dev->ibdev.iwcm->rem_ref = c2_rem_ref;
        dev->ibdev.iwcm->get_qp = c2_get_qp;
@@ -862,23 +866,25 @@ int c2_register_device(struct c2_dev *dev)
 
        ret = ib_register_device(&dev->ibdev);
        if (ret)
-               goto out1;
+               goto out_free_iwcm;
 
        for (i = 0; i < ARRAY_SIZE(c2_dev_attributes); ++i) {
                ret = device_create_file(&dev->ibdev.dev,
                                               c2_dev_attributes[i]);
                if (ret)
-                       goto out0;
+                       goto out_unregister_ibdev;
        }
-       goto out3;
+       goto out;
 
-out0:
+out_unregister_ibdev:
        ib_unregister_device(&dev->ibdev);
-out1:
+out_free_iwcm:
+       kfree(dev->ibdev.iwcm);
+out_unregister_netdev:
        unregister_netdev(dev->pseudo_netdev);
-out2:
+out_free_netdev:
        free_netdev(dev->pseudo_netdev);
-out3:
+out:
        pr_debug("%s:%u ret=%d\n", __func__, __LINE__, ret);
        return ret;
 }
index 62f9cf2f94ec647756dcd5f2ae06c9f01a9eafac..72ed3396b721e36d528f3d6191034493447c33ae 100644 (file)
@@ -852,7 +852,9 @@ int cxio_rdma_init(struct cxio_rdev *rdev_p, struct t3_rdma_init_attr *attr)
        wqe->qpcaps = attr->qpcaps;
        wqe->ulpdu_size = cpu_to_be16(attr->tcp_emss);
        wqe->rqe_count = cpu_to_be16(attr->rqe_count);
-       wqe->flags_rtr_type = cpu_to_be16(attr->flags|V_RTR_TYPE(attr->rtr_type));
+       wqe->flags_rtr_type = cpu_to_be16(attr->flags |
+                                         V_RTR_TYPE(attr->rtr_type) |
+                                         V_CHAN(attr->chan));
        wqe->ord = cpu_to_be32(attr->ord);
        wqe->ird = cpu_to_be32(attr->ird);
        wqe->qp_dma_addr = cpu_to_be64(attr->qp_dma_addr);
@@ -1032,6 +1034,7 @@ err3:
 err2:
        cxio_hal_destroy_ctrl_qp(rdev_p);
 err1:
+       rdev_p->t3cdev_p->ulp = NULL;
        list_del(&rdev_p->entry);
        return err;
 }
index 32e3b1461d81d551f3b1aa2a602c9449d2597c8c..a197a5b7ac7fc74836aaf2ece28ff93c4271428d 100644 (file)
@@ -327,6 +327,11 @@ enum rdma_init_rtr_types {
 #define V_RTR_TYPE(x)  ((x) << S_RTR_TYPE)
 #define G_RTR_TYPE(x)  ((((x) >> S_RTR_TYPE)) & M_RTR_TYPE)
 
+#define S_CHAN         4
+#define M_CHAN         0x3
+#define V_CHAN(x)      ((x) << S_CHAN)
+#define G_CHAN(x)      ((((x) >> S_CHAN)) & M_CHAN)
+
 struct t3_rdma_init_attr {
        u32 tid;
        u32 qpid;
@@ -346,6 +351,7 @@ struct t3_rdma_init_attr {
        u16 flags;
        u16 rqe_count;
        u32 irs;
+       u32 chan;
 };
 
 struct t3_rdma_init_wr {
index 26fc0a4eaa749f91477a711be17710e98dc36ff1..b0ea0105ddf6c2caa01dda9716d7f8f38e21642b 100644 (file)
@@ -51,7 +51,7 @@ cxgb3_cpl_handler_func t3c_handlers[NUM_CPL_CMDS];
 
 static void open_rnic_dev(struct t3cdev *);
 static void close_rnic_dev(struct t3cdev *);
-static void iwch_err_handler(struct t3cdev *, u32, u32);
+static void iwch_event_handler(struct t3cdev *, u32, u32);
 
 struct cxgb3_client t3c_client = {
        .name = "iw_cxgb3",
@@ -59,7 +59,7 @@ struct cxgb3_client t3c_client = {
        .remove = close_rnic_dev,
        .handlers = t3c_handlers,
        .redirect = iwch_ep_redirect,
-       .err_handler = iwch_err_handler
+       .event_handler = iwch_event_handler
 };
 
 static LIST_HEAD(dev_list);
@@ -105,11 +105,9 @@ static void rnic_init(struct iwch_dev *rnicp)
 static void open_rnic_dev(struct t3cdev *tdev)
 {
        struct iwch_dev *rnicp;
-       static int vers_printed;
 
        PDBG("%s t3cdev %p\n", __func__,  tdev);
-       if (!vers_printed++)
-               printk(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
+       printk_once(KERN_INFO MOD "Chelsio T3 RDMA Driver - version %s\n",
                       DRV_VERSION);
        rnicp = (struct iwch_dev *)ib_alloc_device(sizeof(*rnicp));
        if (!rnicp) {
@@ -162,21 +160,36 @@ static void close_rnic_dev(struct t3cdev *tdev)
        mutex_unlock(&dev_mutex);
 }
 
-static void iwch_err_handler(struct t3cdev *tdev, u32 status, u32 error)
+static void iwch_event_handler(struct t3cdev *tdev, u32 evt, u32 port_id)
 {
        struct cxio_rdev *rdev = tdev->ulp;
-       struct iwch_dev *rnicp = rdev_to_iwch_dev(rdev);
+       struct iwch_dev *rnicp;
        struct ib_event event;
+       u32    portnum = port_id + 1;
 
-       if (status == OFFLOAD_STATUS_DOWN) {
+       if (!rdev)
+               return;
+       rnicp = rdev_to_iwch_dev(rdev);
+       switch (evt) {
+       case OFFLOAD_STATUS_DOWN: {
                rdev->flags = CXIO_ERROR_FATAL;
-
-               event.device = &rnicp->ibdev;
                event.event  = IB_EVENT_DEVICE_FATAL;
-               event.element.port_num = 0;
-               ib_dispatch_event(&event);
+               break;
+               }
+       case OFFLOAD_PORT_DOWN: {
+               event.event  = IB_EVENT_PORT_ERR;
+               break;
+               }
+       case OFFLOAD_PORT_UP: {
+               event.event  = IB_EVENT_PORT_ACTIVE;
+               break;
+               }
        }
 
+       event.device = &rnicp->ibdev;
+       event.element.port_num = portnum;
+       ib_dispatch_event(&event);
+
        return;
 }
 
index 52d7bb0c2a126cbf8a867cb522e9062c39015a5b..66b41351910ad390d5ec70dbb18c5ab99a196f45 100644 (file)
@@ -286,7 +286,7 @@ void __free_ep(struct kref *kref)
        ep = container_of(container_of(kref, struct iwch_ep_common, kref),
                          struct iwch_ep, com);
        PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]);
-       if (ep->com.flags & RELEASE_RESOURCES) {
+       if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) {
                cxgb3_remove_tid(ep->com.tdev, (void *)ep, ep->hwtid);
                dst_release(ep->dst);
                l2t_release(L2DATA(ep->com.tdev), ep->l2t);
@@ -297,7 +297,7 @@ void __free_ep(struct kref *kref)
 static void release_ep_resources(struct iwch_ep *ep)
 {
        PDBG("%s ep %p tid %d\n", __func__, ep, ep->hwtid);
-       ep->com.flags |= RELEASE_RESOURCES;
+       set_bit(RELEASE_RESOURCES, &ep->com.flags);
        put_ep(&ep->com);
 }
 
@@ -786,10 +786,12 @@ static void connect_request_upcall(struct iwch_ep *ep)
        event.private_data_len = ep->plen;
        event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
        event.provider_data = ep;
-       if (state_read(&ep->parent_ep->com) != DEAD)
+       if (state_read(&ep->parent_ep->com) != DEAD) {
+               get_ep(&ep->com);
                ep->parent_ep->com.cm_id->event_handler(
                                                ep->parent_ep->com.cm_id,
                                                &event);
+       }
        put_ep(&ep->parent_ep->com);
        ep->parent_ep = NULL;
 }
@@ -1156,8 +1158,7 @@ static int abort_rpl(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
         * We get 2 abort replies from the HW.  The first one must
         * be ignored except for scribbling that we need one more.
         */
-       if (!(ep->com.flags & ABORT_REQ_IN_PROGRESS)) {
-               ep->com.flags |= ABORT_REQ_IN_PROGRESS;
+       if (!test_and_set_bit(ABORT_REQ_IN_PROGRESS, &ep->com.flags)) {
                return CPL_RET_BUF_DONE;
        }
 
@@ -1477,10 +1478,14 @@ static int peer_close(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                /*
                 * We're gonna mark this puppy DEAD, but keep
                 * the reference on it until the ULP accepts or
-                * rejects the CR.
+                * rejects the CR. Also wake up anyone waiting
+                * in rdma connection migration (see iwch_accept_cr()).
                 */
                __state_set(&ep->com, CLOSING);
-               get_ep(&ep->com);
+               ep->com.rpl_done = 1;
+               ep->com.rpl_err = -ECONNRESET;
+               PDBG("waking up ep %p\n", ep);
+               wake_up(&ep->com.waitq);
                break;
        case MPA_REP_SENT:
                __state_set(&ep->com, CLOSING);
@@ -1561,8 +1566,7 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
         * We get 2 peer aborts from the HW.  The first one must
         * be ignored except for scribbling that we need one more.
         */
-       if (!(ep->com.flags & PEER_ABORT_IN_PROGRESS)) {
-               ep->com.flags |= PEER_ABORT_IN_PROGRESS;
+       if (!test_and_set_bit(PEER_ABORT_IN_PROGRESS, &ep->com.flags)) {
                return CPL_RET_BUF_DONE;
        }
 
@@ -1589,9 +1593,13 @@ static int peer_abort(struct t3cdev *tdev, struct sk_buff *skb, void *ctx)
                /*
                 * We're gonna mark this puppy DEAD, but keep
                 * the reference on it until the ULP accepts or
-                * rejects the CR.
+                * rejects the CR. Also wake up anyone waiting
+                * in rdma connection migration (see iwch_accept_cr()).
                 */
-               get_ep(&ep->com);
+               ep->com.rpl_done = 1;
+               ep->com.rpl_err = -ECONNRESET;
+               PDBG("waking up ep %p\n", ep);
+               wake_up(&ep->com.waitq);
                break;
        case MORIBUND:
        case CLOSING:
@@ -1797,6 +1805,7 @@ int iwch_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
                err = send_mpa_reject(ep, pdata, pdata_len);
                err = iwch_ep_disconnect(ep, 0, GFP_KERNEL);
        }
+       put_ep(&ep->com);
        return 0;
 }
 
@@ -1810,8 +1819,10 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        struct iwch_qp *qp = get_qhp(h, conn_param->qpn);
 
        PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);
-       if (state_read(&ep->com) == DEAD)
-               return -ECONNRESET;
+       if (state_read(&ep->com) == DEAD) {
+               err = -ECONNRESET;
+               goto err;
+       }
 
        BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);
        BUG_ON(!qp);
@@ -1819,15 +1830,14 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        if ((conn_param->ord > qp->rhp->attr.max_rdma_read_qp_depth) ||
            (conn_param->ird > qp->rhp->attr.max_rdma_reads_per_qp)) {
                abort_connection(ep, NULL, GFP_KERNEL);
-               return -EINVAL;
+               err = -EINVAL;
+               goto err;
        }
 
        cm_id->add_ref(cm_id);
        ep->com.cm_id = cm_id;
        ep->com.qp = qp;
 
-       ep->com.rpl_done = 0;
-       ep->com.rpl_err = 0;
        ep->ird = conn_param->ird;
        ep->ord = conn_param->ord;
 
@@ -1836,8 +1846,6 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
 
-       get_ep(&ep->com);
-
        /* bind QP to EP and move to RTS */
        attrs.mpa_attr = ep->mpa_attr;
        attrs.max_ird = ep->ird;
@@ -1855,30 +1863,31 @@ int iwch_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        err = iwch_modify_qp(ep->com.qp->rhp,
                             ep->com.qp, mask, &attrs, 1);
        if (err)
-               goto err;
+               goto err1;
 
        /* if needed, wait for wr_ack */
        if (iwch_rqes_posted(qp)) {
                wait_event(ep->com.waitq, ep->com.rpl_done);
                err = ep->com.rpl_err;
                if (err)
-                       goto err;
+                       goto err1;
        }
 
        err = send_mpa_reply(ep, conn_param->private_data,
                             conn_param->private_data_len);
        if (err)
-               goto err;
+               goto err1;
 
 
        state_set(&ep->com, FPDU_MODE);
        established_upcall(ep);
        put_ep(&ep->com);
        return 0;
-err:
+err1:
        ep->com.cm_id = NULL;
        ep->com.qp = NULL;
        cm_id->rem_ref(cm_id);
+err:
        put_ep(&ep->com);
        return err;
 }
@@ -2097,14 +2106,17 @@ int iwch_ep_disconnect(struct iwch_ep *ep, int abrupt, gfp_t gfp)
                        ep->com.state = CLOSING;
                        start_ep_timer(ep);
                }
+               set_bit(CLOSE_SENT, &ep->com.flags);
                break;
        case CLOSING:
-               close = 1;
-               if (abrupt) {
-                       stop_ep_timer(ep);
-                       ep->com.state = ABORTING;
-               } else
-                       ep->com.state = MORIBUND;
+               if (!test_and_set_bit(CLOSE_SENT, &ep->com.flags)) {
+                       close = 1;
+                       if (abrupt) {
+                               stop_ep_timer(ep);
+                               ep->com.state = ABORTING;
+                       } else
+                               ep->com.state = MORIBUND;
+               }
                break;
        case MORIBUND:
        case ABORTING:
index 43c0aea7eadc8b0b52ac1b8b3df8965c5a20a059..b9efadfffb4f64b39e991230b25e676c9a23c777 100644 (file)
@@ -145,9 +145,10 @@ enum iwch_ep_state {
 };
 
 enum iwch_ep_flags {
-       PEER_ABORT_IN_PROGRESS  = (1 << 0),
-       ABORT_REQ_IN_PROGRESS   = (1 << 1),
-       RELEASE_RESOURCES       = (1 << 2),
+       PEER_ABORT_IN_PROGRESS  = 0,
+       ABORT_REQ_IN_PROGRESS   = 1,
+       RELEASE_RESOURCES       = 2,
+       CLOSE_SENT              = 3,
 };
 
 struct iwch_ep_common {
@@ -162,7 +163,7 @@ struct iwch_ep_common {
        wait_queue_head_t waitq;
        int rpl_done;
        int rpl_err;
-       u32 flags;
+       unsigned long flags;
 };
 
 struct iwch_listen_ep {
index ec49a5cbdebbc19705968069b58d2ab9f98d7d8e..e1ec65ebb016e4c7cfbfe81ca1e2095ba614ce08 100644 (file)
@@ -39,7 +39,7 @@
 #include "iwch.h"
 #include "iwch_provider.h"
 
-static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
+static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
 {
        u32 mmid;
 
@@ -47,14 +47,15 @@ static void iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
        mhp->attr.stag = stag;
        mmid = stag >> 8;
        mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
-       insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
        PDBG("%s mmid 0x%x mhp %p\n", __func__, mmid, mhp);
+       return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
 }
 
 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
                      struct iwch_mr *mhp, int shift)
 {
        u32 stag;
+       int ret;
 
        if (cxio_register_phys_mem(&rhp->rdev,
                                   &stag, mhp->attr.pdid,
@@ -66,9 +67,11 @@ int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
                                   mhp->attr.pbl_size, mhp->attr.pbl_addr))
                return -ENOMEM;
 
-       iwch_finish_mem_reg(mhp, stag);
-
-       return 0;
+       ret = iwch_finish_mem_reg(mhp, stag);
+       if (ret)
+               cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+                      mhp->attr.pbl_addr);
+       return ret;
 }
 
 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
@@ -77,6 +80,7 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
                                        int npages)
 {
        u32 stag;
+       int ret;
 
        /* We could support this... */
        if (npages > mhp->attr.pbl_size)
@@ -93,9 +97,12 @@ int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
                                   mhp->attr.pbl_size, mhp->attr.pbl_addr))
                return -ENOMEM;
 
-       iwch_finish_mem_reg(mhp, stag);
+       ret = iwch_finish_mem_reg(mhp, stag);
+       if (ret)
+               cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
+                      mhp->attr.pbl_addr);
 
-       return 0;
+       return ret;
 }
 
 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
index e2a63214008a90b1ddf137341bbbbc4eac2d16e4..6895523779d0d5ebb9d5eeb4f58e9d5ca42a1c0c 100644 (file)
@@ -195,7 +195,11 @@ static struct ib_cq *iwch_create_cq(struct ib_device *ibdev, int entries, int ve
        spin_lock_init(&chp->lock);
        atomic_set(&chp->refcnt, 1);
        init_waitqueue_head(&chp->wait);
-       insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid);
+       if (insert_handle(rhp, &rhp->cqidr, chp, chp->cq.cqid)) {
+               cxio_destroy_cq(&chp->rhp->rdev, &chp->cq);
+               kfree(chp);
+               return ERR_PTR(-ENOMEM);
+       }
 
        if (ucontext) {
                struct iwch_mm_entry *mm;
@@ -750,7 +754,11 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)
        mhp->attr.stag = stag;
        mmid = (stag) >> 8;
        mhp->ibmw.rkey = stag;
-       insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+       if (insert_handle(rhp, &rhp->mmidr, mhp, mmid)) {
+               cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);
+               kfree(mhp);
+               return ERR_PTR(-ENOMEM);
+       }
        PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
        return &(mhp->ibmw);
 }
@@ -778,37 +786,43 @@ static struct ib_mr *iwch_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth)
        struct iwch_mr *mhp;
        u32 mmid;
        u32 stag = 0;
-       int ret;
+       int ret = 0;
 
        php = to_iwch_pd(pd);
        rhp = php->rhp;
        mhp = kzalloc(sizeof(*mhp), GFP_KERNEL);
        if (!mhp)
-               return ERR_PTR(-ENOMEM);
+               goto err;
 
        mhp->rhp = rhp;
        ret = iwch_alloc_pbl(mhp, pbl_depth);
-       if (ret) {
-               kfree(mhp);
-               return ERR_PTR(ret);
-       }
+       if (ret)
+               goto err1;
        mhp->attr.pbl_size = pbl_depth;
        ret = cxio_allocate_stag(&rhp->rdev, &stag, php->pdid,
                                 mhp->attr.pbl_size, mhp->attr.pbl_addr);
-       if (ret) {
-               iwch_free_pbl(mhp);
-               kfree(mhp);
-               return ERR_PTR(ret);
-       }
+       if (ret)
+               goto err2;
        mhp->attr.pdid = php->pdid;
        mhp->attr.type = TPT_NON_SHARED_MR;
        mhp->attr.stag = stag;
        mhp->attr.state = 1;
        mmid = (stag) >> 8;
        mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
-       insert_handle(rhp, &rhp->mmidr, mhp, mmid);
+       if (insert_handle(rhp, &rhp->mmidr, mhp, mmid))
+               goto err3;
+
        PDBG("%s mmid 0x%x mhp %p stag 0x%x\n", __func__, mmid, mhp, stag);
        return &(mhp->ibmr);
+err3:
+       cxio_dereg_mem(&rhp->rdev, stag, mhp->attr.pbl_size,
+                      mhp->attr.pbl_addr);
+err2:
+       iwch_free_pbl(mhp);
+err1:
+       kfree(mhp);
+err:
+       return ERR_PTR(ret);
 }
 
 static struct ib_fast_reg_page_list *iwch_alloc_fastreg_pbl(
@@ -961,7 +975,13 @@ static struct ib_qp *iwch_create_qp(struct ib_pd *pd,
        spin_lock_init(&qhp->lock);
        init_waitqueue_head(&qhp->wait);
        atomic_set(&qhp->refcnt, 1);
-       insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid);
+
+       if (insert_handle(rhp, &rhp->qpidr, qhp, qhp->wq.qpid)) {
+               cxio_destroy_qp(&rhp->rdev, &qhp->wq,
+                       ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
+               kfree(qhp);
+               return ERR_PTR(-ENOMEM);
+       }
 
        if (udata) {
 
@@ -1418,6 +1438,7 @@ int iwch_register_device(struct iwch_dev *dev)
 bail2:
        ib_unregister_device(&dev->ibdev);
 bail1:
+       kfree(dev->ibdev.iwcm);
        return ret;
 }
 
@@ -1430,5 +1451,6 @@ void iwch_unregister_device(struct iwch_dev *dev)
                device_remove_file(&dev->ibdev.dev,
                                   iwch_class_attributes[i]);
        ib_unregister_device(&dev->ibdev);
+       kfree(dev->ibdev.iwcm);
        return;
 }
index 27bbdc8e773ae934e03b8d421a08102c7b945d63..6e86534719414ff5e5e6c859dd4fd3cbf995cfa5 100644 (file)
@@ -889,6 +889,7 @@ static int rdma_init(struct iwch_dev *rhp, struct iwch_qp *qhp,
        init_attr.qp_dma_size = (1UL << qhp->wq.size_log2);
        init_attr.rqe_count = iwch_rqes_posted(qhp);
        init_attr.flags = qhp->attr.mpa_attr.initiator ? MPA_INITIATOR : 0;
+       init_attr.chan = qhp->ep->l2t->smt_idx;
        if (peer2peer) {
                init_attr.rtr_type = RTR_READ;
                if (init_attr.ord == 0 && qhp->attr.mpa_attr.initiator)
index fab18a2c74a8d55f00e5d2772ef1b448e01d6e59..5b635aa5947e27822a5386cdc447f8b4a2693db3 100644 (file)
@@ -52,7 +52,7 @@
 #include "ehca_tools.h"
 #include "hcp_if.h"
 
-#define HCAD_VERSION "0028"
+#define HCAD_VERSION "0029"
 
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_AUTHOR("Christoph Raisch <raisch@de.ibm.com>");
@@ -64,7 +64,7 @@ static int ehca_hw_level      = 0;
 static int ehca_poll_all_eqs  = 1;
 
 int ehca_debug_level   = 0;
-int ehca_nr_ports      = 2;
+int ehca_nr_ports      = -1;
 int ehca_use_hp_mr     = 0;
 int ehca_port_act_time = 30;
 int ehca_static_rate   = -1;
@@ -95,8 +95,8 @@ MODULE_PARM_DESC(hw_level,
                 "Hardware level (0: autosensing (default), "
                 "0x10..0x14: eHCA, 0x20..0x23: eHCA2)");
 MODULE_PARM_DESC(nr_ports,
-                "number of connected ports (-1: autodetect, 1: port one only, "
-                "2: two ports (default)");
+                "number of connected ports (-1: autodetect (default), "
+                "1: port one only, 2: two ports)");
 MODULE_PARM_DESC(use_hp_mr,
                 "Use high performance MRs (default: no)");
 MODULE_PARM_DESC(port_act_time,
index 5a3d96f84c79c780b078b03b363bb847ef41006e..8fd88cd828fd657ab11ef33e551e0f1060669f80 100644 (file)
@@ -786,7 +786,11 @@ repoll:
        wc->slid = cqe->rlid;
        wc->dlid_path_bits = cqe->dlid;
        wc->src_qp = cqe->remote_qp_number;
-       wc->wc_flags = cqe->w_completion_flags;
+       /*
+        * HW has "Immed data present" and "GRH present" in bits 6 and 5.
+        * SW defines those in bits 1 and 0, so we can just shift and mask.
+        */
+       wc->wc_flags = (cqe->w_completion_flags >> 5) & 3;
        wc->ex.imm_data = cpu_to_be32(cqe->immediate_data);
        wc->sl = cqe->service_level;
 
index c568b28f4e207416762c18c69d86f27818e9d530..8c1213f8916a19bfdccab6f3dcaf08f14065c509 100644 (file)
@@ -125,14 +125,30 @@ struct ib_perf {
        u8 data[192];
 } __attribute__ ((packed));
 
+/* TC/SL/FL packed into 32 bits, as in ClassPortInfo */
+struct tcslfl {
+       u32 tc:8;
+       u32 sl:4;
+       u32 fl:20;
+} __attribute__ ((packed));
+
+/* IP Version/TC/FL packed into 32 bits, as in GRH */
+struct vertcfl {
+       u32 ver:4;
+       u32 tc:8;
+       u32 fl:20;
+} __attribute__ ((packed));
 
 static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
+                            struct ib_wc *in_wc, struct ib_grh *in_grh,
                             struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
        struct ib_perf *in_perf = (struct ib_perf *)in_mad;
        struct ib_perf *out_perf = (struct ib_perf *)out_mad;
        struct ib_class_port_info *poi =
                (struct ib_class_port_info *)out_perf->data;
+       struct tcslfl *tcslfl =
+               (struct tcslfl *)&poi->redirect_tcslfl;
        struct ehca_shca *shca =
                container_of(ibdev, struct ehca_shca, ib_device);
        struct ehca_sport *sport = &shca->sport[port_num - 1];
@@ -158,10 +174,29 @@ static int ehca_process_perf(struct ib_device *ibdev, u8 port_num,
                poi->base_version = 1;
                poi->class_version = 1;
                poi->resp_time_value = 18;
-               poi->redirect_lid = sport->saved_attr.lid;
-               poi->redirect_qp = sport->pma_qp_nr;
+
+               /* copy local routing information from WC where applicable */
+               tcslfl->sl         = in_wc->sl;
+               poi->redirect_lid  =
+                       sport->saved_attr.lid | in_wc->dlid_path_bits;
+               poi->redirect_qp   = sport->pma_qp_nr;
                poi->redirect_qkey = IB_QP1_QKEY;
-               poi->redirect_pkey = IB_DEFAULT_PKEY_FULL;
+
+               ehca_query_pkey(ibdev, port_num, in_wc->pkey_index,
+                               &poi->redirect_pkey);
+
+               /* if request was globally routed, copy route info */
+               if (in_grh) {
+                       struct vertcfl *vertcfl =
+                               (struct vertcfl *)&in_grh->version_tclass_flow;
+                       memcpy(poi->redirect_gid, in_grh->dgid.raw,
+                              sizeof(poi->redirect_gid));
+                       tcslfl->tc        = vertcfl->tc;
+                       tcslfl->fl        = vertcfl->fl;
+               } else
+                       /* else only fill in default GID */
+                       ehca_query_gid(ibdev, port_num, 0,
+                                      (union ib_gid *)&poi->redirect_gid);
 
                ehca_dbg(ibdev, "ehca_pma_lid=%x ehca_pma_qp=%x",
                         sport->saved_attr.lid, sport->pma_qp_nr);
@@ -183,8 +218,7 @@ perf_reply:
 
 int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                     struct ib_wc *in_wc, struct ib_grh *in_grh,
-                    struct ib_mad *in_mad,
-                    struct ib_mad *out_mad)
+                    struct ib_mad *in_mad, struct ib_mad *out_mad)
 {
        int ret;
 
@@ -196,7 +230,8 @@ int ehca_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
                return IB_MAD_RESULT_SUCCESS;
 
        ehca_dbg(ibdev, "port_num=%x src_qp=%x", port_num, in_wc->src_qp);
-       ret = ehca_process_perf(ibdev, port_num, in_mad, out_mad);
+       ret = ehca_process_perf(ibdev, port_num, in_wc, in_grh,
+                               in_mad, out_mad);
 
        return ret;
 }
index 23173982b32c1c636fdc2bbc398ad1168e3ff10d..38a287006612c055b3a1faa32800099041257c35 100644 (file)
@@ -1616,7 +1616,7 @@ static int try_alloc_port(struct ipath_devdata *dd, int port,
                pd->port_cnt = 1;
                port_fp(fp) = pd;
                pd->port_pid = get_pid(task_pid(current));
-               strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
+               strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
                ipath_stats.sps_ports++;
                ret = 0;
        } else
index 16a702d460184f8f66e2434b720aa9842a145701..ceb98ee7866646d87ecdc6b8351622a11441facd 100644 (file)
@@ -60,7 +60,7 @@ static int recv_subn_get_nodedescription(struct ib_smp *smp,
        if (smp->attr_mod)
                smp->status |= IB_SMP_INVALID_FIELD;
 
-       strncpy(smp->data, ibdev->node_desc, sizeof(smp->data));
+       memcpy(smp->data, ibdev->node_desc, sizeof(smp->data));
 
        return reply(smp);
 }
index ae3d7590346e850c0f5579bea15390c91e8b4764..3cb3f47a10b85753cded5f80f019517bf81e3635 100644 (file)
@@ -342,6 +342,9 @@ static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
        struct mlx4_ib_alloc_ucontext_resp resp;
        int err;
 
+       if (!dev->ib_active)
+               return ERR_PTR(-EAGAIN);
+
        resp.qp_tab_size      = dev->dev->caps.num_qps;
        resp.bf_reg_size      = dev->dev->caps.bf_reg_size;
        resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
@@ -540,15 +543,11 @@ static struct device_attribute *mlx4_class_attributes[] = {
 
 static void *mlx4_ib_add(struct mlx4_dev *dev)
 {
-       static int mlx4_ib_version_printed;
        struct mlx4_ib_dev *ibdev;
        int num_ports = 0;
        int i;
 
-       if (!mlx4_ib_version_printed) {
-               printk(KERN_INFO "%s", mlx4_ib_version);
-               ++mlx4_ib_version_printed;
-       }
+       printk_once(KERN_INFO "%s", mlx4_ib_version);
 
        mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_IB)
                num_ports++;
@@ -673,6 +672,8 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)
                        goto err_reg;
        }
 
+       ibdev->ib_active = true;
+
        return ibdev;
 
 err_reg:
@@ -729,6 +730,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
                break;
 
        case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
+               ibdev->ib_active = false;
                ibev.event = IB_EVENT_DEVICE_FATAL;
                break;
 
index 8a7dd6795fa0a2117f3f21311c9a1a4eccdf4c9c..3486d7675e56dfece310b090abf0eec93d140ec6 100644 (file)
@@ -175,6 +175,7 @@ struct mlx4_ib_dev {
        spinlock_t              sm_lock;
 
        struct mutex            cap_mask_mutex;
+       bool                    ib_active;
 };
 
 static inline struct mlx4_ib_dev *to_mdev(struct ib_device *ibdev)
index c4a02648c8afe78ce6553e95fcc0742ed499274b..219b10397b4d0fd64f19767adbdb3e24352f1476 100644 (file)
@@ -615,10 +615,12 @@ static enum mlx4_qp_state to_mlx4_state(enum ib_qp_state state)
 }
 
 static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
 {
-       if (send_cq == recv_cq)
+       if (send_cq == recv_cq) {
                spin_lock_irq(&send_cq->lock);
-       else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+               __acquire(&recv_cq->lock);
+       } else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
                spin_lock_irq(&send_cq->lock);
                spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
        } else {
@@ -628,10 +630,12 @@ static void mlx4_ib_lock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv
 }
 
 static void mlx4_ib_unlock_cqs(struct mlx4_ib_cq *send_cq, struct mlx4_ib_cq *recv_cq)
+       __releases(&send_cq->lock) __releases(&recv_cq->lock)
 {
-       if (send_cq == recv_cq)
+       if (send_cq == recv_cq) {
+               __release(&recv_cq->lock);
                spin_unlock_irq(&send_cq->lock);
-       else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
+       else if (send_cq->mcq.cqn < recv_cq->mcq.cqn) {
                spin_unlock(&recv_cq->lock);
                spin_unlock_irq(&send_cq->lock);
        } else {
index 65ad359fdf164e506ec9a5726ba12c5a5ee882f2..056b2a4c69700f9fdb7cb6640213804e5f850aec 100644 (file)
@@ -88,6 +88,7 @@ static void handle_catas(struct mthca_dev *dev)
        event.device = &dev->ib_dev;
        event.event  = IB_EVENT_DEVICE_FATAL;
        event.element.port_num = 0;
+       dev->active = false;
 
        ib_dispatch_event(&event);
 
index 75671f75cac482bf67ce0a6c3692634cce7f43b5..155bc66395beaeaf5aa1476b884cb12c527715a5 100644 (file)
@@ -34,8 +34,6 @@
 #ifndef MTHCA_CONFIG_REG_H
 #define MTHCA_CONFIG_REG_H
 
-#include <asm/page.h>
-
 #define MTHCA_HCR_BASE         0x80680
 #define MTHCA_HCR_SIZE         0x0001c
 #define MTHCA_ECR_BASE         0x80700
index 9ef611f6dd36d52f531198d0a61d7ec11dbe78fd..7e6a6d64ad4eb1bee96b0d2d244daf20898ab3b6 100644 (file)
@@ -357,6 +357,7 @@ struct mthca_dev {
        struct ib_ah         *sm_ah[MTHCA_MAX_PORTS];
        spinlock_t            sm_lock;
        u8                    rate[MTHCA_MAX_PORTS];
+       bool                  active;
 };
 
 #ifdef CONFIG_INFINIBAND_MTHCA_DEBUG
index 90e4e450a12022d4186ca19f1c94c9e897e5c785..8c31fa36e95e7102a96ecf9ca6b31988b0f99640 100644 (file)
@@ -829,27 +829,34 @@ int mthca_init_eq_table(struct mthca_dev *dev)
 
        if (dev->mthca_flags & MTHCA_FLAG_MSI_X) {
                static const char *eq_name[] = {
-                       [MTHCA_EQ_COMP]  = DRV_NAME " (comp)",
-                       [MTHCA_EQ_ASYNC] = DRV_NAME " (async)",
-                       [MTHCA_EQ_CMD]   = DRV_NAME " (cmd)"
+                       [MTHCA_EQ_COMP]  = DRV_NAME "-comp",
+                       [MTHCA_EQ_ASYNC] = DRV_NAME "-async",
+                       [MTHCA_EQ_CMD]   = DRV_NAME "-cmd"
                };
 
                for (i = 0; i < MTHCA_NUM_EQ; ++i) {
+                       snprintf(dev->eq_table.eq[i].irq_name,
+                                IB_DEVICE_NAME_MAX,
+                                "%s@pci:%s", eq_name[i],
+                                pci_name(dev->pdev));
                        err = request_irq(dev->eq_table.eq[i].msi_x_vector,
                                          mthca_is_memfree(dev) ?
                                          mthca_arbel_msi_x_interrupt :
                                          mthca_tavor_msi_x_interrupt,
-                                         0, eq_name[i], dev->eq_table.eq + i);
+                                         0, dev->eq_table.eq[i].irq_name,
+                                         dev->eq_table.eq + i);
                        if (err)
                                goto err_out_cmd;
                        dev->eq_table.eq[i].have_irq = 1;
                }
        } else {
+               snprintf(dev->eq_table.eq[0].irq_name, IB_DEVICE_NAME_MAX,
+                        DRV_NAME "@pci:%s", pci_name(dev->pdev));
                err = request_irq(dev->pdev->irq,
                                  mthca_is_memfree(dev) ?
                                  mthca_arbel_interrupt :
                                  mthca_tavor_interrupt,
-                                 IRQF_SHARED, DRV_NAME, dev);
+                                 IRQF_SHARED, dev->eq_table.eq[0].irq_name, dev);
                if (err)
                        goto err_out_cmd;
                dev->eq_table.have_irq = 1;
index 13da9f1d24c0a2bf9d715f32701d0cb02406cba2..b01b28987874e9145d241310347e4691695c2716 100644 (file)
@@ -1116,6 +1116,8 @@ static int __mthca_init_one(struct pci_dev *pdev, int hca_type)
        pci_set_drvdata(pdev, mdev);
        mdev->hca_type = hca_type;
 
+       mdev->active = true;
+
        return 0;
 
 err_unregister:
@@ -1215,15 +1217,11 @@ int __mthca_restart_one(struct pci_dev *pdev)
 static int __devinit mthca_init_one(struct pci_dev *pdev,
                                    const struct pci_device_id *id)
 {
-       static int mthca_version_printed = 0;
        int ret;
 
        mutex_lock(&mthca_device_mutex);
 
-       if (!mthca_version_printed) {
-               printk(KERN_INFO "%s", mthca_version);
-               ++mthca_version_printed;
-       }
+       printk_once(KERN_INFO "%s", mthca_version);
 
        if (id->driver_data >= ARRAY_SIZE(mthca_hca_table)) {
                printk(KERN_ERR PFX "%s has invalid driver data %lx\n",
index 87ad889e367b2b6b39cfe010ec1ec28f1dd488ac..bcf7a401482015f3b5c09afdc4177bab3d10577f 100644 (file)
@@ -334,6 +334,9 @@ static struct ib_ucontext *mthca_alloc_ucontext(struct ib_device *ibdev,
        struct mthca_ucontext           *context;
        int                              err;
 
+       if (!(to_mdev(ibdev)->active))
+               return ERR_PTR(-EAGAIN);
+
        memset(&uresp, 0, sizeof uresp);
 
        uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps;
index c621f8794b8820d11e6293d62c2ce35d11e8e7ee..90f4c4d2e98359f5b808643b32e7bb6e8a959201 100644 (file)
@@ -113,6 +113,7 @@ struct mthca_eq {
        int                    nent;
        struct mthca_buf_list *page_list;
        struct mthca_mr        mr;
+       char                   irq_name[IB_DEVICE_NAME_MAX];
 };
 
 struct mthca_av;
index f5081bfde6db19641ce62cb3638139caebb6f880..c10576fa60c112931650ab8c322b32dab5f478f9 100644 (file)
@@ -1319,10 +1319,12 @@ int mthca_alloc_qp(struct mthca_dev *dev,
 }
 
 static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+       __acquires(&send_cq->lock) __acquires(&recv_cq->lock)
 {
-       if (send_cq == recv_cq)
+       if (send_cq == recv_cq) {
                spin_lock_irq(&send_cq->lock);
-       else if (send_cq->cqn < recv_cq->cqn) {
+               __acquire(&recv_cq->lock);
+       } else if (send_cq->cqn < recv_cq->cqn) {
                spin_lock_irq(&send_cq->lock);
                spin_lock_nested(&recv_cq->lock, SINGLE_DEPTH_NESTING);
        } else {
@@ -1332,10 +1334,12 @@ static void mthca_lock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
 }
 
 static void mthca_unlock_cqs(struct mthca_cq *send_cq, struct mthca_cq *recv_cq)
+       __releases(&send_cq->lock) __releases(&recv_cq->lock)
 {
-       if (send_cq == recv_cq)
+       if (send_cq == recv_cq) {
+               __release(&recv_cq->lock);
                spin_unlock_irq(&send_cq->lock);
-       else if (send_cq->cqn < recv_cq->cqn) {
+       else if (send_cq->cqn < recv_cq->cqn) {
                spin_unlock(&recv_cq->lock);
                spin_unlock_irq(&send_cq->lock);
        } else {
index acb6817f6060615a4db753e3b4dde6c7f3dd31bb..2a13a163d33780ce9d646aba280a0ceeb76ff9ca 100644 (file)
@@ -30,7 +30,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
index bf1720f7f35fe6e07d80321bff8e1de42a834256..bcc6abc4faffafb9fba5b3076644b568434ecc98 100644 (file)
@@ -523,7 +523,7 @@ int nes_cm_disconn(struct nes_qp *);
 void nes_cm_disconn_worker(void *);
 
 /* nes_verbs.c */
-int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32);
+int nes_hw_modify_qp(struct nes_device *, struct nes_qp *, u32, u32, u32);
 int nes_modify_qp(struct ib_qp *, struct ib_qp_attr *, int, struct ib_udata *);
 struct nes_ib_device *nes_init_ofa_device(struct net_device *);
 void nes_destroy_ofa_device(struct nes_ib_device *);
index 114b802771ada144c5a4df3fc25407e2acb34156..73473db1986361f2a390a30b09c942f74e62ca96 100644 (file)
@@ -2450,19 +2450,16 @@ static int nes_cm_init_tsa_conn(struct nes_qp *nesqp, struct nes_cm_node *cm_nod
  */
 int nes_cm_disconn(struct nes_qp *nesqp)
 {
-       unsigned long flags;
-
-       spin_lock_irqsave(&nesqp->lock, flags);
-       if (nesqp->disconn_pending == 0) {
-               nesqp->disconn_pending++;
-               spin_unlock_irqrestore(&nesqp->lock, flags);
-               /* init our disconnect work element, to */
-               INIT_WORK(&nesqp->disconn_work, nes_disconnect_worker);
+       struct disconn_work *work;
 
-               queue_work(g_cm_core->disconn_wq, &nesqp->disconn_work);
-       } else
-               spin_unlock_irqrestore(&nesqp->lock, flags);
+       work = kzalloc(sizeof *work, GFP_ATOMIC);
+       if (!work)
+               return -ENOMEM; /* Timer will clean up */
 
+       nes_add_ref(&nesqp->ibqp);
+       work->nesqp = nesqp;
+       INIT_WORK(&work->work, nes_disconnect_worker);
+       queue_work(g_cm_core->disconn_wq, &work->work);
        return 0;
 }
 
@@ -2472,11 +2469,14 @@ int nes_cm_disconn(struct nes_qp *nesqp)
  */
 static void nes_disconnect_worker(struct work_struct *work)
 {
-       struct nes_qp *nesqp = container_of(work, struct nes_qp, disconn_work);
+       struct disconn_work *dwork = container_of(work, struct disconn_work, work);
+       struct nes_qp *nesqp = dwork->nesqp;
 
+       kfree(dwork);
        nes_debug(NES_DBG_CM, "processing AEQE id 0x%04X for QP%u.\n",
                        nesqp->last_aeq, nesqp->hwqp.qp_id);
        nes_cm_disconn_true(nesqp);
+       nes_rem_ref(&nesqp->ibqp);
 }
 
 
@@ -2493,7 +2493,12 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
        u16 last_ae;
        u8 original_hw_tcp_state;
        u8 original_ibqp_state;
-       u8 issued_disconnect_reset = 0;
+       enum iw_cm_event_type disconn_status = IW_CM_EVENT_STATUS_OK;
+       int issue_disconn = 0;
+       int issue_close = 0;
+       int issue_flush = 0;
+       u32 flush_q = NES_CQP_FLUSH_RQ;
+       struct ib_event ibevent;
 
        if (!nesqp) {
                nes_debug(NES_DBG_CM, "disconnect_worker nesqp is NULL\n");
@@ -2517,24 +2522,55 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
        original_ibqp_state   = nesqp->ibqp_state;
        last_ae = nesqp->last_aeq;
 
+       if (nesqp->term_flags) {
+               issue_disconn = 1;
+               issue_close = 1;
+               nesqp->cm_id = NULL;
+               if (nesqp->flush_issued == 0) {
+                       nesqp->flush_issued = 1;
+                       issue_flush = 1;
+               }
+       } else if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
+                       ((original_ibqp_state == IB_QPS_RTS) &&
+                       (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+               issue_disconn = 1;
+               if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET)
+                       disconn_status = IW_CM_EVENT_STATUS_RESET;
+       }
+
+       if (((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
+                (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
+                (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
+                (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+               issue_close = 1;
+               nesqp->cm_id = NULL;
+               if (nesqp->flush_issued == 0) {
+                       nesqp->flush_issued = 1;
+                       issue_flush = 1;
+               }
+       }
+
+       spin_unlock_irqrestore(&nesqp->lock, flags);
 
-       nes_debug(NES_DBG_CM, "set ibqp_state=%u\n", nesqp->ibqp_state);
+       if ((issue_flush) && (nesqp->destroyed == 0)) {
+               /* Flush the queue(s) */
+               if (nesqp->hw_iwarp_state >= NES_AEQE_IWARP_STATE_TERMINATE)
+                       flush_q |= NES_CQP_FLUSH_SQ;
+               flush_wqes(nesvnic->nesdev, nesqp, flush_q, 1);
 
-       if ((nesqp->cm_id) && (cm_id->event_handler)) {
-               if ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
-                               ((original_ibqp_state == IB_QPS_RTS) &&
-                               (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+               if (nesqp->term_flags) {
+                       ibevent.device = nesqp->ibqp.device;
+                       ibevent.event = nesqp->terminate_eventtype;
+                       ibevent.element.qp = &nesqp->ibqp;
+                       nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+               }
+       }
+
+       if ((cm_id) && (cm_id->event_handler)) {
+               if (issue_disconn) {
                        atomic_inc(&cm_disconnects);
                        cm_event.event = IW_CM_EVENT_DISCONNECT;
-                       if (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET) {
-                               cm_event.status = IW_CM_EVENT_STATUS_RESET;
-                               nes_debug(NES_DBG_CM, "Generating a CM "
-                                       "Disconnect Event (status reset) for "
-                                       "QP%u, cm_id = %p. \n",
-                                       nesqp->hwqp.qp_id, cm_id);
-                       } else
-                               cm_event.status = IW_CM_EVENT_STATUS_OK;
-
+                       cm_event.status = disconn_status;
                        cm_event.local_addr = cm_id->local_addr;
                        cm_event.remote_addr = cm_id->remote_addr;
                        cm_event.private_data = NULL;
@@ -2547,29 +2583,14 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
                                nesqp->hwqp.sq_tail, cm_id,
                                atomic_read(&nesqp->refcount));
 
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
                        ret = cm_id->event_handler(cm_id, &cm_event);
                        if (ret)
                                nes_debug(NES_DBG_CM, "OFA CM event_handler "
                                        "returned, ret=%d\n", ret);
-                       spin_lock_irqsave(&nesqp->lock, flags);
                }
 
-               nesqp->disconn_pending = 0;
-               /* There might have been another AE while the lock was released */
-               original_hw_tcp_state = nesqp->hw_tcp_state;
-               original_ibqp_state   = nesqp->ibqp_state;
-               last_ae = nesqp->last_aeq;
-
-               if ((issued_disconnect_reset == 0) && (nesqp->cm_id) &&
-                               ((original_hw_tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
-                                (original_hw_tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT) ||
-                                (last_ae == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) ||
-                                (last_ae == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+               if (issue_close) {
                        atomic_inc(&cm_closes);
-                       nesqp->cm_id = NULL;
-                       nesqp->in_disconnect = 0;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
                        nes_disconnect(nesqp, 1);
 
                        cm_id->provider_data = nesqp;
@@ -2588,28 +2609,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
                        }
 
                        cm_id->rem_ref(cm_id);
-
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       if (nesqp->flush_issued == 0) {
-                               nesqp->flush_issued = 1;
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-                               flush_wqes(nesvnic->nesdev, nesqp,
-                                       NES_CQP_FLUSH_RQ, 1);
-                       } else
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-               } else {
-                       cm_id = nesqp->cm_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       /* check to see if the inbound reset beat the outbound reset */
-                       if ((!cm_id) && (last_ae==NES_AEQE_AEID_RESET_SENT)) {
-                               nes_debug(NES_DBG_CM, "QP%u: Decing refcount "
-                                       "due to inbound reset beating the "
-                                       "outbound reset.\n", nesqp->hwqp.qp_id);
-                       }
                }
-       } else {
-               nesqp->disconn_pending = 0;
-               spin_unlock_irqrestore(&nesqp->lock, flags);
        }
 
        return 0;
index 8b7e7c0e496ecc7c2055c9845db3a0e746e8e3b9..90e8e4d8a5cef8522039c252f300530da6a15957 100644 (file)
@@ -410,8 +410,6 @@ struct nes_cm_ops {
 int schedule_nes_timer(struct nes_cm_node *, struct sk_buff *,
                enum nes_timer_type, int, int);
 
-int nes_cm_disconn(struct nes_qp *);
-
 int nes_accept(struct iw_cm_id *, struct iw_cm_conn_param *);
 int nes_reject(struct iw_cm_id *, const void *, u8);
 int nes_connect(struct iw_cm_id *, struct iw_cm_conn_param *);
index 4a84d02ece0637fcbd3da54aed4131e6fbb45c4b..63a1a8e1e8a3d2f28631d7674188657deb2bf1f4 100644 (file)
@@ -74,6 +74,8 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
 static void process_critical_error(struct nes_device *nesdev);
 static void nes_process_mac_intr(struct nes_device *nesdev, u32 mac_number);
 static unsigned int nes_reset_adapter_ne020(struct nes_device *nesdev, u8 *OneG_Mode);
+static void nes_terminate_timeout(unsigned long context);
+static void nes_terminate_start_timer(struct nes_qp *nesqp);
 
 #ifdef CONFIG_INFINIBAND_NES_DEBUG
 static unsigned char *nes_iwarp_state_str[] = {
@@ -2903,6 +2905,417 @@ static void nes_cqp_ce_handler(struct nes_device *nesdev, struct nes_hw_cq *cq)
 }
 
 
+static u8 *locate_mpa(u8 *pkt, u32 aeq_info)
+{
+       u16 pkt_len;
+
+       if (aeq_info & NES_AEQE_Q2_DATA_ETHERNET) {
+               /* skip over ethernet header */
+               pkt_len = be16_to_cpu(*(u16 *)(pkt + ETH_HLEN - 2));
+               pkt += ETH_HLEN;
+
+               /* Skip over IP and TCP headers */
+               pkt += 4 * (pkt[0] & 0x0f);
+               pkt += 4 * ((pkt[12] >> 4) & 0x0f);
+       }
+       return pkt;
+}
+
+/* Determine if incoming error pkt is rdma layer */
+static u32 iwarp_opcode(struct nes_qp *nesqp, u32 aeq_info)
+{
+       u8 *pkt;
+       u16 *mpa;
+       u32 opcode = 0xffffffff;
+
+       if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+               pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+               mpa = (u16 *)locate_mpa(pkt, aeq_info);
+               opcode = be16_to_cpu(mpa[1]) & 0xf;
+       }
+
+       return opcode;
+}
+
+/* Build iWARP terminate header */
+static int nes_bld_terminate_hdr(struct nes_qp *nesqp, u16 async_event_id, u32 aeq_info)
+{
+       u8 *pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+       u16 ddp_seg_len;
+       int copy_len = 0;
+       u8 is_tagged = 0;
+       u8 flush_code = 0;
+       struct nes_terminate_hdr *termhdr;
+
+       termhdr = (struct nes_terminate_hdr *)nesqp->hwqp.q2_vbase;
+       memset(termhdr, 0, 64);
+
+       if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+
+               /* Use data from offending packet to fill in ddp & rdma hdrs */
+               pkt = locate_mpa(pkt, aeq_info);
+               ddp_seg_len = be16_to_cpu(*(u16 *)pkt);
+               if (ddp_seg_len) {
+                       copy_len = 2;
+                       termhdr->hdrct = DDP_LEN_FLAG;
+                       if (pkt[2] & 0x80) {
+                               is_tagged = 1;
+                               if (ddp_seg_len >= TERM_DDP_LEN_TAGGED) {
+                                       copy_len += TERM_DDP_LEN_TAGGED;
+                                       termhdr->hdrct |= DDP_HDR_FLAG;
+                               }
+                       } else {
+                               if (ddp_seg_len >= TERM_DDP_LEN_UNTAGGED) {
+                                       copy_len += TERM_DDP_LEN_UNTAGGED;
+                                       termhdr->hdrct |= DDP_HDR_FLAG;
+                               }
+
+                               if (ddp_seg_len >= (TERM_DDP_LEN_UNTAGGED + TERM_RDMA_LEN)) {
+                                       if ((pkt[3] & RDMA_OPCODE_MASK) == RDMA_READ_REQ_OPCODE) {
+                                               copy_len += TERM_RDMA_LEN;
+                                               termhdr->hdrct |= RDMA_HDR_FLAG;
+                                       }
+                               }
+                       }
+               }
+       }
+
+       switch (async_event_id) {
+       case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
+               switch (iwarp_opcode(nesqp, aeq_info)) {
+               case IWARP_OPCODE_WRITE:
+                       flush_code = IB_WC_LOC_PROT_ERR;
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+                       termhdr->error_code = DDP_TAGGED_INV_STAG;
+                       break;
+               default:
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_INV_STAG;
+               }
+               break;
+       case NES_AEQE_AEID_AMP_INVALID_STAG:
+               flush_code = IB_WC_REM_ACCESS_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+               termhdr->error_code = RDMAP_INV_STAG;
+               break;
+       case NES_AEQE_AEID_AMP_BAD_QP:
+               flush_code = IB_WC_LOC_QP_OP_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_QN;
+               break;
+       case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
+       case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
+               switch (iwarp_opcode(nesqp, aeq_info)) {
+               case IWARP_OPCODE_SEND_INV:
+               case IWARP_OPCODE_SEND_SE_INV:
+                       flush_code = IB_WC_REM_OP_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+                       termhdr->error_code = RDMAP_CANT_INV_STAG;
+                       break;
+               default:
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_INV_STAG;
+               }
+               break;
+       case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
+               if (aeq_info & (NES_AEQE_Q2_DATA_ETHERNET | NES_AEQE_Q2_DATA_MPA)) {
+                       flush_code = IB_WC_LOC_PROT_ERR;
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+                       termhdr->error_code = DDP_TAGGED_BOUNDS;
+               } else {
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_INV_BOUNDS;
+               }
+               break;
+       case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
+       case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
+       case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
+               flush_code = IB_WC_REM_ACCESS_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+               termhdr->error_code = RDMAP_ACCESS;
+               break;
+       case NES_AEQE_AEID_AMP_TO_WRAP:
+               flush_code = IB_WC_REM_ACCESS_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+               termhdr->error_code = RDMAP_TO_WRAP;
+               break;
+       case NES_AEQE_AEID_AMP_BAD_PD:
+               switch (iwarp_opcode(nesqp, aeq_info)) {
+               case IWARP_OPCODE_WRITE:
+                       flush_code = IB_WC_LOC_PROT_ERR;
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+                       termhdr->error_code = DDP_TAGGED_UNASSOC_STAG;
+                       break;
+               case IWARP_OPCODE_SEND_INV:
+               case IWARP_OPCODE_SEND_SE_INV:
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_CANT_INV_STAG;
+                       break;
+               default:
+                       flush_code = IB_WC_REM_ACCESS_ERR;
+                       termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_PROT;
+                       termhdr->error_code = RDMAP_UNASSOC_STAG;
+               }
+               break;
+       case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+               flush_code = IB_WC_LOC_LEN_ERR;
+               termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
+               termhdr->error_code = MPA_MARKER;
+               break;
+       case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_MPA << 4) | DDP_LLP;
+               termhdr->error_code = MPA_CRC;
+               break;
+       case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
+       case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
+               flush_code = IB_WC_LOC_LEN_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
+               termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
+               break;
+       case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
+       case NES_AEQE_AEID_DDP_NO_L_BIT:
+               flush_code = IB_WC_FATAL_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_CATASTROPHIC;
+               termhdr->error_code = DDP_CATASTROPHIC_LOCAL;
+               break;
+       case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
+       case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_MSN_RANGE;
+               break;
+       case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+               flush_code = IB_WC_LOC_LEN_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_TOO_LONG;
+               break;
+       case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
+               flush_code = IB_WC_GENERAL_ERR;
+               if (is_tagged) {
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_TAGGED_BUFFER;
+                       termhdr->error_code = DDP_TAGGED_INV_DDP_VER;
+               } else {
+                       termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+                       termhdr->error_code = DDP_UNTAGGED_INV_DDP_VER;
+               }
+               break;
+       case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_MO;
+               break;
+       case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+               flush_code = IB_WC_REM_OP_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_MSN_NO_BUF;
+               break;
+       case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_DDP << 4) | DDP_UNTAGGED_BUFFER;
+               termhdr->error_code = DDP_UNTAGGED_INV_QN;
+               break;
+       case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
+               flush_code = IB_WC_GENERAL_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+               termhdr->error_code = RDMAP_INV_RDMAP_VER;
+               break;
+       case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
+               flush_code = IB_WC_LOC_QP_OP_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+               termhdr->error_code = RDMAP_UNEXPECTED_OP;
+               break;
+       default:
+               flush_code = IB_WC_FATAL_ERR;
+               termhdr->layer_etype = (LAYER_RDMA << 4) | RDMAP_REMOTE_OP;
+               termhdr->error_code = RDMAP_UNSPECIFIED;
+               break;
+       }
+
+       if (copy_len)
+               memcpy(termhdr + 1, pkt, copy_len);
+
+       if ((flush_code) && ((NES_AEQE_INBOUND_RDMA & aeq_info) == 0)) {
+               if (aeq_info & NES_AEQE_SQ)
+                       nesqp->term_sq_flush_code = flush_code;
+               else
+                       nesqp->term_rq_flush_code = flush_code;
+       }
+
+       return sizeof(struct nes_terminate_hdr) + copy_len;
+}
+
+static void nes_terminate_connection(struct nes_device *nesdev, struct nes_qp *nesqp,
+                struct nes_hw_aeqe *aeqe, enum ib_event_type eventtype)
+{
+       u64 context;
+       unsigned long flags;
+       u32 aeq_info;
+       u16 async_event_id;
+       u8 tcp_state;
+       u8 iwarp_state;
+       u32 termlen = 0;
+       u32 mod_qp_flags = NES_CQP_QP_IWARP_STATE_TERMINATE |
+                          NES_CQP_QP_TERM_DONT_SEND_FIN;
+       struct nes_adapter *nesadapter = nesdev->nesadapter;
+
+       if (nesqp->term_flags & NES_TERM_SENT)
+               return; /* Sanity check */
+
+       aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+       tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+       iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+       async_event_id = (u16)aeq_info;
+
+       context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
+               aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
+       if (!context) {
+               WARN_ON(!context);
+               return;
+       }
+
+       nesqp = (struct nes_qp *)(unsigned long)context;
+       spin_lock_irqsave(&nesqp->lock, flags);
+       nesqp->hw_iwarp_state = iwarp_state;
+       nesqp->hw_tcp_state = tcp_state;
+       nesqp->last_aeq = async_event_id;
+       nesqp->terminate_eventtype = eventtype;
+       spin_unlock_irqrestore(&nesqp->lock, flags);
+
+       if (nesadapter->send_term_ok)
+               termlen = nes_bld_terminate_hdr(nesqp, async_event_id, aeq_info);
+       else
+               mod_qp_flags |= NES_CQP_QP_TERM_DONT_SEND_TERM_MSG;
+
+       nes_terminate_start_timer(nesqp);
+       nesqp->term_flags |= NES_TERM_SENT;
+       nes_hw_modify_qp(nesdev, nesqp, mod_qp_flags, termlen, 0);
+}
+
+static void nes_terminate_send_fin(struct nes_device *nesdev,
+                         struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
+{
+       u32 aeq_info;
+       u16 async_event_id;
+       u8 tcp_state;
+       u8 iwarp_state;
+       unsigned long flags;
+
+       aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+       tcp_state = (aeq_info & NES_AEQE_TCP_STATE_MASK) >> NES_AEQE_TCP_STATE_SHIFT;
+       iwarp_state = (aeq_info & NES_AEQE_IWARP_STATE_MASK) >> NES_AEQE_IWARP_STATE_SHIFT;
+       async_event_id = (u16)aeq_info;
+
+       spin_lock_irqsave(&nesqp->lock, flags);
+       nesqp->hw_iwarp_state = iwarp_state;
+       nesqp->hw_tcp_state = tcp_state;
+       nesqp->last_aeq = async_event_id;
+       spin_unlock_irqrestore(&nesqp->lock, flags);
+
+       /* Send the fin only */
+       nes_hw_modify_qp(nesdev, nesqp, NES_CQP_QP_IWARP_STATE_TERMINATE |
+               NES_CQP_QP_TERM_DONT_SEND_TERM_MSG, 0, 0);
+}
+
+/* Cleanup after a terminate sent or received */
+static void nes_terminate_done(struct nes_qp *nesqp, int timeout_occurred)
+{
+       u32 next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
+       unsigned long flags;
+       struct nes_vnic *nesvnic = to_nesvnic(nesqp->ibqp.device);
+       struct nes_device *nesdev = nesvnic->nesdev;
+       u8 first_time = 0;
+
+       spin_lock_irqsave(&nesqp->lock, flags);
+       if (nesqp->hte_added) {
+               nesqp->hte_added = 0;
+               next_iwarp_state |= NES_CQP_QP_DEL_HTE;
+       }
+
+       first_time = (nesqp->term_flags & NES_TERM_DONE) == 0;
+       nesqp->term_flags |= NES_TERM_DONE;
+       spin_unlock_irqrestore(&nesqp->lock, flags);
+
+       /* Make sure we go through this only once */
+       if (first_time) {
+               if (timeout_occurred == 0)
+                       del_timer(&nesqp->terminate_timer);
+               else
+                       next_iwarp_state |= NES_CQP_QP_RESET;
+
+               nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+               nes_cm_disconn(nesqp);
+       }
+}
+
+static void nes_terminate_received(struct nes_device *nesdev,
+                               struct nes_qp *nesqp, struct nes_hw_aeqe *aeqe)
+{
+       u32 aeq_info;
+       u8 *pkt;
+       u32 *mpa;
+       u8 ddp_ctl;
+       u8 rdma_ctl;
+       u16 aeq_id = 0;
+
+       aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
+       if (aeq_info & NES_AEQE_Q2_DATA_WRITTEN) {
+               /* Terminate is not a performance path so the silicon */
+               /* did not validate the frame - do it now */
+               pkt = nesqp->hwqp.q2_vbase + BAD_FRAME_OFFSET;
+               mpa = (u32 *)locate_mpa(pkt, aeq_info);
+               ddp_ctl = (be32_to_cpu(mpa[0]) >> 8) & 0xff;
+               rdma_ctl = be32_to_cpu(mpa[0]) & 0xff;
+               if ((ddp_ctl & 0xc0) != 0x40)
+                       aeq_id = NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC;
+               else if ((ddp_ctl & 0x03) != 1)
+                       aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION;
+               else if (be32_to_cpu(mpa[2]) != 2)
+                       aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_QN;
+               else if (be32_to_cpu(mpa[3]) != 1)
+                       aeq_id = NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN;
+               else if (be32_to_cpu(mpa[4]) != 0)
+                       aeq_id = NES_AEQE_AEID_DDP_UBE_INVALID_MO;
+               else if ((rdma_ctl & 0xc0) != 0x40)
+                       aeq_id = NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION;
+
+               if (aeq_id) {
+                       /* Bad terminate recvd - send back a terminate */
+                       aeq_info = (aeq_info & 0xffff0000) | aeq_id;
+                       aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
+                       nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+                       return;
+               }
+       }
+
+       nesqp->term_flags |= NES_TERM_RCVD;
+       nesqp->terminate_eventtype = IB_EVENT_QP_FATAL;
+       nes_terminate_start_timer(nesqp);
+       nes_terminate_send_fin(nesdev, nesqp, aeqe);
+}
+
+/* Timeout routine in case terminate fails to complete */
+static void nes_terminate_timeout(unsigned long context)
+{
+       struct nes_qp *nesqp = (struct nes_qp *)(unsigned long)context;
+
+       nes_terminate_done(nesqp, 1);
+}
+
+/* Set a timer in case hw cannot complete the terminate sequence */
+static void nes_terminate_start_timer(struct nes_qp *nesqp)
+{
+       init_timer(&nesqp->terminate_timer);
+       nesqp->terminate_timer.function = nes_terminate_timeout;
+       nesqp->terminate_timer.expires = jiffies + HZ;
+       nesqp->terminate_timer.data = (unsigned long)nesqp;
+       add_timer(&nesqp->terminate_timer);
+}
+
 /**
  * nes_process_iwarp_aeqe
  */
@@ -2910,28 +3323,27 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                   struct nes_hw_aeqe *aeqe)
 {
        u64 context;
-       u64 aeqe_context = 0;
        unsigned long flags;
        struct nes_qp *nesqp;
+       struct nes_hw_cq *hw_cq;
+       struct nes_cq *nescq;
        int resource_allocated;
-       /* struct iw_cm_id *cm_id; */
        struct nes_adapter *nesadapter = nesdev->nesadapter;
-       struct ib_event ibevent;
-       /* struct iw_cm_event cm_event; */
        u32 aeq_info;
        u32 next_iwarp_state = 0;
        u16 async_event_id;
        u8 tcp_state;
        u8 iwarp_state;
+       int must_disconn = 1;
+       int must_terminate = 0;
+       struct ib_event ibevent;
 
        nes_debug(NES_DBG_AEQ, "\n");
        aeq_info = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_MISC_IDX]);
-       if ((NES_AEQE_INBOUND_RDMA&aeq_info) || (!(NES_AEQE_QP&aeq_info))) {
+       if ((NES_AEQE_INBOUND_RDMA & aeq_info) || (!(NES_AEQE_QP & aeq_info))) {
                context  = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
                context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
        } else {
-               aeqe_context = le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_LOW_IDX]);
-               aeqe_context += ((u64)le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_CTXT_HIGH_IDX])) << 32;
                context = (unsigned long)nesadapter->qp_table[le32_to_cpu(
                                                aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]) - NES_FIRST_QPN];
                BUG_ON(!context);
@@ -2948,7 +3360,11 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
 
        switch (async_event_id) {
                case NES_AEQE_AEID_LLP_FIN_RECEIVED:
-                       nesqp = *((struct nes_qp **)&context);
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+
+                       if (nesqp->term_flags)
+                               return; /* Ignore it, wait for close complete */
+
                        if (atomic_inc_return(&nesqp->close_timer_started) == 1) {
                                nesqp->cm_id->add_ref(nesqp->cm_id);
                                schedule_nes_timer(nesqp->cm_node, (struct sk_buff *)nesqp,
@@ -2959,18 +3375,24 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hwqp.qp_id, atomic_read(&nesqp->refcount),
                                                async_event_id, nesqp->last_aeq, tcp_state);
                        }
+
                        if ((tcp_state != NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
                                        (nesqp->ibqp_state != IB_QPS_RTS)) {
                                /* FIN Received but tcp state or IB state moved on,
                                                should expect a close complete */
                                return;
                        }
+
                case NES_AEQE_AEID_LLP_CLOSE_COMPLETE:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       if (nesqp->term_flags) {
+                               nes_terminate_done(nesqp, 0);
+                               return;
+                       }
+
                case NES_AEQE_AEID_LLP_CONNECTION_RESET:
-               case NES_AEQE_AEID_TERMINATE_SENT:
-               case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
                case NES_AEQE_AEID_RESET_SENT:
-                       nesqp = *((struct nes_qp **)&context);
+                       nesqp = (struct nes_qp *)(unsigned long)context;
                        if (async_event_id == NES_AEQE_AEID_RESET_SENT) {
                                tcp_state = NES_AEQE_TCP_STATE_CLOSED;
                        }
@@ -2982,12 +3404,7 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                        if ((tcp_state == NES_AEQE_TCP_STATE_CLOSED) ||
                                        (tcp_state == NES_AEQE_TCP_STATE_TIME_WAIT)) {
                                nesqp->hte_added = 0;
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-                               nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u to remove hte\n",
-                                               nesqp->hwqp.qp_id);
-                               nes_hw_modify_qp(nesdev, nesqp,
-                                               NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE, 0);
-                               spin_lock_irqsave(&nesqp->lock, flags);
+                               next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_DEL_HTE;
                        }
 
                        if ((nesqp->ibqp_state == IB_QPS_RTS) &&
@@ -2999,151 +3416,106 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                                                nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_CLOSING;
                                                break;
                                        case NES_AEQE_IWARP_STATE_TERMINATE:
-                                               next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE;
-                                               nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_TERMINATE;
-                                               if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
-                                                       next_iwarp_state |= 0x02000000;
-                                                       nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
-                                               }
+                                               must_disconn = 0; /* terminate path takes care of disconn */
+                                               if (nesqp->term_flags == 0)
+                                                       must_terminate = 1;
                                                break;
-                                       default:
-                                               next_iwarp_state = 0;
-                               }
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-                               if (next_iwarp_state) {
-                                       nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
-                                                       " also added another reference\n",
-                                                       nesqp->hwqp.qp_id, next_iwarp_state);
-                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
                                }
-                               nes_cm_disconn(nesqp);
                        } else {
                                if (async_event_id ==  NES_AEQE_AEID_LLP_FIN_RECEIVED) {
                                        /* FIN Received but ib state not RTS,
                                                        close complete will be on its way */
-                                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                                       return;
-                               }
-                               spin_unlock_irqrestore(&nesqp->lock, flags);
-                               if (async_event_id == NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE) {
-                                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000;
-                                       nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
-                                       nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X,"
-                                                       " also added another reference\n",
-                                                       nesqp->hwqp.qp_id, next_iwarp_state);
-                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
+                                       must_disconn = 0;
                                }
-                               nes_cm_disconn(nesqp);
                        }
-                       break;
-               case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
                        spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TERMINATE_RECEIVED"
-                                       " event on QP%u \n  Q2 Data:\n",
-                                       nesqp->hwqp.qp_id);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_FATAL;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
-                       if ((tcp_state == NES_AEQE_TCP_STATE_CLOSE_WAIT) ||
-                                       ((nesqp->ibqp_state == IB_QPS_RTS)&&
-                                       (async_event_id == NES_AEQE_AEID_LLP_CONNECTION_RESET))) {
+
+                       if (must_terminate)
+                               nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
+                       else if (must_disconn) {
+                               if (next_iwarp_state) {
+                                       nes_debug(NES_DBG_AEQ, "issuing hw modifyqp for QP%u. next state = 0x%08X\n",
+                                                 nesqp->hwqp.qp_id, next_iwarp_state);
+                                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 0);
+                               }
                                nes_cm_disconn(nesqp);
-                       } else {
-                               nesqp->in_disconnect = 0;
-                               wake_up(&nesqp->kick_waitq);
                        }
                        break;
-               case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = NES_AEQE_IWARP_STATE_ERROR;
-                       nesqp->hw_tcp_state = NES_AEQE_TCP_STATE_CLOSED;
-                       nesqp->last_aeq = async_event_id;
-                       if (nesqp->cm_id) {
-                               nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
-                                               " event on QP%u, remote IP = 0x%08X \n",
-                                               nesqp->hwqp.qp_id,
-                                               ntohl(nesqp->cm_id->remote_addr.sin_addr.s_addr));
-                       } else {
-                               nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_TOO_MANY_RETRIES"
-                                               " event on QP%u \n",
-                                               nesqp->hwqp.qp_id);
-                       }
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR | NES_CQP_QP_RESET;
-                       nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_FATAL;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
+
+               case NES_AEQE_AEID_TERMINATE_SENT:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       nes_terminate_send_fin(nesdev, nesqp, aeqe);
                        break;
-               case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
-                       if (NES_AEQE_INBOUND_RDMA&aeq_info) {
-                               nesqp = nesadapter->qp_table[le32_to_cpu(
-                                               aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
-                       } else {
-                               /* TODO: get the actual WQE and mask off wqe index */
-                               context &= ~((u64)511);
-                               nesqp = *((struct nes_qp **)&context);
-                       }
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_BAD_STAG_INDEX event on QP%u\n",
-                                       nesqp->hwqp.qp_id);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_ACCESS_ERR;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
+
+               case NES_AEQE_AEID_LLP_TERMINATE_RECEIVED:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       nes_terminate_received(nesdev, nesqp, aeqe);
                        break;
+
+               case NES_AEQE_AEID_AMP_BAD_STAG_KEY:
+               case NES_AEQE_AEID_AMP_BAD_STAG_INDEX:
                case NES_AEQE_AEID_AMP_UNALLOCATED_STAG:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_AMP_UNALLOCATED_STAG event on QP%u\n",
-                                       nesqp->hwqp.qp_id);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_ACCESS_ERR;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
-                       break;
+               case NES_AEQE_AEID_AMP_INVALID_STAG:
+               case NES_AEQE_AEID_AMP_RIGHTS_VIOLATION:
+               case NES_AEQE_AEID_AMP_INVALIDATE_NO_REMOTE_ACCESS_RIGHTS:
                case NES_AEQE_AEID_PRIV_OPERATION_DENIED:
-                       nesqp = nesadapter->qp_table[le32_to_cpu(aeqe->aeqe_words
-                                       [NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_PRIV_OPERATION_DENIED event on QP%u,"
-                                       " nesqp = %p, AE reported %p\n",
-                                       nesqp->hwqp.qp_id, nesqp, *((struct nes_qp **)&context));
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_ACCESS_ERR;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
+               case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
+               case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
+               case NES_AEQE_AEID_AMP_TO_WRAP:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_ACCESS_ERR);
+                       break;
+
+               case NES_AEQE_AEID_LLP_SEGMENT_TOO_LARGE:
+               case NES_AEQE_AEID_LLP_SEGMENT_TOO_SMALL:
+               case NES_AEQE_AEID_DDP_UBE_INVALID_MO:
+               case NES_AEQE_AEID_DDP_UBE_INVALID_QN:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       if (iwarp_opcode(nesqp, aeq_info) > IWARP_OPCODE_TERM) {
+                               aeq_info &= 0xffff0000;
+                               aeq_info |= NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE;
+                               aeqe->aeqe_words[NES_AEQE_MISC_IDX] = cpu_to_le32(aeq_info);
                        }
+
+               case NES_AEQE_AEID_RDMAP_ROE_BAD_LLP_CLOSE:
+               case NES_AEQE_AEID_LLP_TOO_MANY_RETRIES:
+               case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
+               case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
+               case NES_AEQE_AEID_AMP_BAD_QP:
+               case NES_AEQE_AEID_LLP_RECEIVED_MARKER_AND_LENGTH_FIELDS_DONT_MATCH:
+               case NES_AEQE_AEID_DDP_LCE_LOCAL_CATASTROPHIC:
+               case NES_AEQE_AEID_DDP_NO_L_BIT:
+               case NES_AEQE_AEID_DDP_INVALID_MSN_GAP_IN_MSN:
+               case NES_AEQE_AEID_DDP_INVALID_MSN_RANGE_IS_NOT_VALID:
+               case NES_AEQE_AEID_DDP_UBE_INVALID_DDP_VERSION:
+               case NES_AEQE_AEID_RDMAP_ROE_INVALID_RDMAP_VERSION:
+               case NES_AEQE_AEID_RDMAP_ROE_UNEXPECTED_OPCODE:
+               case NES_AEQE_AEID_AMP_BAD_PD:
+               case NES_AEQE_AEID_AMP_FASTREG_SHARED:
+               case NES_AEQE_AEID_AMP_FASTREG_VALID_STAG:
+               case NES_AEQE_AEID_AMP_FASTREG_MW_STAG:
+               case NES_AEQE_AEID_AMP_FASTREG_INVALID_RIGHTS:
+               case NES_AEQE_AEID_AMP_FASTREG_PBL_TABLE_OVERFLOW:
+               case NES_AEQE_AEID_AMP_FASTREG_INVALID_LENGTH:
+               case NES_AEQE_AEID_AMP_INVALIDATE_SHARED:
+               case NES_AEQE_AEID_AMP_INVALIDATE_MR_WITH_BOUND_WINDOWS:
+               case NES_AEQE_AEID_AMP_MWBIND_VALID_STAG:
+               case NES_AEQE_AEID_AMP_MWBIND_OF_MR_STAG:
+               case NES_AEQE_AEID_AMP_MWBIND_TO_ZERO_BASED_STAG:
+               case NES_AEQE_AEID_AMP_MWBIND_TO_MW_STAG:
+               case NES_AEQE_AEID_AMP_MWBIND_INVALID_RIGHTS:
+               case NES_AEQE_AEID_AMP_MWBIND_INVALID_BOUNDS:
+               case NES_AEQE_AEID_AMP_MWBIND_TO_INVALID_PARENT:
+               case NES_AEQE_AEID_AMP_MWBIND_BIND_DISABLED:
+               case NES_AEQE_AEID_BAD_CLOSE:
+               case NES_AEQE_AEID_RDMA_READ_WHILE_ORD_ZERO:
+               case NES_AEQE_AEID_STAG_ZERO_INVALID:
+               case NES_AEQE_AEID_ROE_INVALID_RDMA_READ_REQUEST:
+               case NES_AEQE_AEID_ROE_INVALID_RDMA_WRITE_OR_READ_RESP:
+                       nesqp = (struct nes_qp *)(unsigned long)context;
+                       nes_terminate_connection(nesdev, nesqp, aeqe, IB_EVENT_QP_FATAL);
                        break;
+
                case NES_AEQE_AEID_CQ_OPERATION_ERROR:
                        context <<= 1;
                        nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u, %p\n",
@@ -3153,83 +3525,19 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
                        if (resource_allocated) {
                                printk(KERN_ERR PFX "%s: Processing an NES_AEQE_AEID_CQ_OPERATION_ERROR event on CQ%u\n",
                                                __func__, le32_to_cpu(aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX]));
+                               hw_cq = (struct nes_hw_cq *)(unsigned long)context;
+                               if (hw_cq) {
+                                       nescq = container_of(hw_cq, struct nes_cq, hw_cq);
+                                       if (nescq->ibcq.event_handler) {
+                                               ibevent.device = nescq->ibcq.device;
+                                               ibevent.event = IB_EVENT_CQ_ERR;
+                                               ibevent.element.cq = &nescq->ibcq;
+                                               nescq->ibcq.event_handler(&ibevent, nescq->ibcq.cq_context);
+                                       }
+                               }
                        }
                        break;
-               case NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG_FOR_AVAILABLE_BUFFER:
-                       nesqp = nesadapter->qp_table[le32_to_cpu(
-                                       aeqe->aeqe_words[NES_AEQE_COMP_QP_CQ_ID_IDX])-NES_FIRST_QPN];
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_DDP_MESSAGE_TOO_LONG"
-                                       "_FOR_AVAILABLE_BUFFER event on QP%u\n",
-                                       nesqp->hwqp.qp_id);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_ACCESS_ERR;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
-                       /* tell cm to disconnect, cm will queue work to thread */
-                       nes_cm_disconn(nesqp);
-                       break;
-               case NES_AEQE_AEID_DDP_UBE_INVALID_MSN_NO_BUFFER_AVAILABLE:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_DDP_UBE_INVALID_MSN"
-                                       "_NO_BUFFER_AVAILABLE event on QP%u\n",
-                                       nesqp->hwqp.qp_id);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_FATAL;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
-                       /* tell cm to disconnect, cm will queue work to thread */
-                       nes_cm_disconn(nesqp);
-                       break;
-               case NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       nes_debug(NES_DBG_AEQ, "Processing an NES_AEQE_AEID_LLP_RECEIVED_MPA_CRC_ERROR"
-                                       " event on QP%u \n  Q2 Data:\n",
-                                       nesqp->hwqp.qp_id);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_FATAL;
-                               nesqp->ibqp.event_handler(&ibevent, nesqp->ibqp.qp_context);
-                       }
-                       /* tell cm to disconnect, cm will queue work to thread */
-                       nes_cm_disconn(nesqp);
-                       break;
-                       /* TODO: additional AEs need to be here */
-               case NES_AEQE_AEID_AMP_BOUNDS_VIOLATION:
-                       nesqp = *((struct nes_qp **)&context);
-                       spin_lock_irqsave(&nesqp->lock, flags);
-                       nesqp->hw_iwarp_state = iwarp_state;
-                       nesqp->hw_tcp_state = tcp_state;
-                       nesqp->last_aeq = async_event_id;
-                       spin_unlock_irqrestore(&nesqp->lock, flags);
-                       if (nesqp->ibqp.event_handler) {
-                               ibevent.device = nesqp->ibqp.device;
-                               ibevent.element.qp = &nesqp->ibqp;
-                               ibevent.event = IB_EVENT_QP_ACCESS_ERR;
-                               nesqp->ibqp.event_handler(&ibevent,
-                                               nesqp->ibqp.qp_context);
-                       }
-                       nes_cm_disconn(nesqp);
-                       break;
+
                default:
                        nes_debug(NES_DBG_AEQ, "Processing an iWARP related AE for QP, misc = 0x%04X\n",
                                        async_event_id);
@@ -3238,7 +3546,6 @@ static void nes_process_iwarp_aeqe(struct nes_device *nesdev,
 
 }
 
-
 /**
  * nes_iwarp_ce_handler
  */
@@ -3373,6 +3680,8 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
 {
        struct nes_cqp_request *cqp_request;
        struct nes_hw_cqp_wqe *cqp_wqe;
+       u32 sq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
+       u32 rq_code = (NES_IWARP_CQE_MAJOR_FLUSH << 16) | NES_IWARP_CQE_MINOR_FLUSH;
        int ret;
 
        cqp_request = nes_get_cqp_request(nesdev);
@@ -3389,6 +3698,24 @@ void flush_wqes(struct nes_device *nesdev, struct nes_qp *nesqp,
        cqp_wqe = &cqp_request->cqp_wqe;
        nes_fill_init_cqp_wqe(cqp_wqe, nesdev);
 
+       /* If wqe in error was identified, set code to be put into cqe */
+       if ((nesqp->term_sq_flush_code) && (which_wq & NES_CQP_FLUSH_SQ)) {
+               which_wq |= NES_CQP_FLUSH_MAJ_MIN;
+               sq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_sq_flush_code;
+               nesqp->term_sq_flush_code = 0;
+       }
+
+       if ((nesqp->term_rq_flush_code) && (which_wq & NES_CQP_FLUSH_RQ)) {
+               which_wq |= NES_CQP_FLUSH_MAJ_MIN;
+               rq_code = (CQE_MAJOR_DRV << 16) | nesqp->term_rq_flush_code;
+               nesqp->term_rq_flush_code = 0;
+       }
+
+       if (which_wq & NES_CQP_FLUSH_MAJ_MIN) {
+               cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_SQ_CODE] = cpu_to_le32(sq_code);
+               cqp_wqe->wqe_words[NES_CQP_QP_WQE_FLUSH_RQ_CODE] = cpu_to_le32(rq_code);
+       }
+
        cqp_wqe->wqe_words[NES_CQP_WQE_OPCODE_IDX] =
                        cpu_to_le32(NES_CQP_FLUSH_WQES | which_wq);
        cqp_wqe->wqe_words[NES_CQP_WQE_ID_IDX] = cpu_to_le32(nesqp->hwqp.qp_id);
index c3654c6383fec427e501bcc3e1b04b574c700d43..f28a41ba9fa14378bf9f68c147e74976e0c1c3f4 100644 (file)
@@ -241,6 +241,7 @@ enum nes_cqp_stag_wqeword_idx {
 };
 
 #define NES_CQP_OP_IWARP_STATE_SHIFT 28
+#define NES_CQP_OP_TERMLEN_SHIFT     28
 
 enum nes_cqp_qp_bits {
        NES_CQP_QP_ARP_VALID = (1<<8),
@@ -265,12 +266,16 @@ enum nes_cqp_qp_bits {
        NES_CQP_QP_IWARP_STATE_TERMINATE = (5<<NES_CQP_OP_IWARP_STATE_SHIFT),
        NES_CQP_QP_IWARP_STATE_ERROR = (6<<NES_CQP_OP_IWARP_STATE_SHIFT),
        NES_CQP_QP_IWARP_STATE_MASK = (7<<NES_CQP_OP_IWARP_STATE_SHIFT),
+       NES_CQP_QP_TERM_DONT_SEND_FIN = (1<<24),
+       NES_CQP_QP_TERM_DONT_SEND_TERM_MSG = (1<<25),
        NES_CQP_QP_RESET = (1<<31),
 };
 
 enum nes_cqp_qp_wqe_word_idx {
        NES_CQP_QP_WQE_CONTEXT_LOW_IDX = 6,
        NES_CQP_QP_WQE_CONTEXT_HIGH_IDX = 7,
+       NES_CQP_QP_WQE_FLUSH_SQ_CODE = 8,
+       NES_CQP_QP_WQE_FLUSH_RQ_CODE = 9,
        NES_CQP_QP_WQE_NEW_MSS_IDX = 15,
 };
 
@@ -361,6 +366,7 @@ enum nes_cqp_arp_bits {
 enum nes_cqp_flush_bits {
        NES_CQP_FLUSH_SQ = (1<<30),
        NES_CQP_FLUSH_RQ = (1<<31),
+       NES_CQP_FLUSH_MAJ_MIN = (1<<28),
 };
 
 enum nes_cqe_opcode_bits {
@@ -633,11 +639,14 @@ enum nes_aeqe_bits {
        NES_AEQE_INBOUND_RDMA = (1<<19),
        NES_AEQE_IWARP_STATE_MASK = (7<<20),
        NES_AEQE_TCP_STATE_MASK = (0xf<<24),
+       NES_AEQE_Q2_DATA_WRITTEN = (0x3<<28),
        NES_AEQE_VALID = (1<<31),
 };
 
 #define NES_AEQE_IWARP_STATE_SHIFT     20
 #define NES_AEQE_TCP_STATE_SHIFT       24
+#define NES_AEQE_Q2_DATA_ETHERNET       (1<<28)
+#define NES_AEQE_Q2_DATA_MPA            (1<<29)
 
 enum nes_aeqe_iwarp_state {
        NES_AEQE_IWARP_STATE_NON_EXISTANT = 0,
@@ -751,6 +760,15 @@ enum nes_iwarp_sq_wqe_bits {
        NES_IWARP_SQ_OP_NOP = 12,
 };
 
+enum nes_iwarp_cqe_major_code {
+       NES_IWARP_CQE_MAJOR_FLUSH = 1,
+       NES_IWARP_CQE_MAJOR_DRV = 0x8000
+};
+
+enum nes_iwarp_cqe_minor_code {
+       NES_IWARP_CQE_MINOR_FLUSH = 1
+};
+
 #define NES_EEPROM_READ_REQUEST (1<<16)
 #define NES_MAC_ADDR_VALID      (1<<20)
 
@@ -1119,6 +1137,7 @@ struct nes_adapter {
        u8            netdev_max;       /* from host nic address count in EEPROM */
        u8            port_count;
        u8            virtwq;
+       u8            send_term_ok;
        u8            et_use_adaptive_rx_coalesce;
        u8            adapter_fcn_count;
        u8 pft_mcast_map[NES_PFT_SIZE];
@@ -1217,6 +1236,90 @@ struct nes_ib_device {
        u32 num_pd;
 };
 
+enum nes_hdrct_flags {
+       DDP_LEN_FLAG                    = 0x80,
+       DDP_HDR_FLAG                    = 0x40,
+       RDMA_HDR_FLAG                   = 0x20
+};
+
+enum nes_term_layers {
+       LAYER_RDMA                      = 0,
+       LAYER_DDP                       = 1,
+       LAYER_MPA                       = 2
+};
+
+enum nes_term_error_types {
+       RDMAP_CATASTROPHIC              = 0,
+       RDMAP_REMOTE_PROT               = 1,
+       RDMAP_REMOTE_OP                 = 2,
+       DDP_CATASTROPHIC                = 0,
+       DDP_TAGGED_BUFFER               = 1,
+       DDP_UNTAGGED_BUFFER             = 2,
+       DDP_LLP                         = 3
+};
+
+enum nes_term_rdma_errors {
+       RDMAP_INV_STAG                  = 0x00,
+       RDMAP_INV_BOUNDS                = 0x01,
+       RDMAP_ACCESS                    = 0x02,
+       RDMAP_UNASSOC_STAG              = 0x03,
+       RDMAP_TO_WRAP                   = 0x04,
+       RDMAP_INV_RDMAP_VER             = 0x05,
+       RDMAP_UNEXPECTED_OP             = 0x06,
+       RDMAP_CATASTROPHIC_LOCAL        = 0x07,
+       RDMAP_CATASTROPHIC_GLOBAL       = 0x08,
+       RDMAP_CANT_INV_STAG             = 0x09,
+       RDMAP_UNSPECIFIED               = 0xff
+};
+
+enum nes_term_ddp_errors {
+       DDP_CATASTROPHIC_LOCAL          = 0x00,
+       DDP_TAGGED_INV_STAG             = 0x00,
+       DDP_TAGGED_BOUNDS               = 0x01,
+       DDP_TAGGED_UNASSOC_STAG         = 0x02,
+       DDP_TAGGED_TO_WRAP              = 0x03,
+       DDP_TAGGED_INV_DDP_VER          = 0x04,
+       DDP_UNTAGGED_INV_QN             = 0x01,
+       DDP_UNTAGGED_INV_MSN_NO_BUF     = 0x02,
+       DDP_UNTAGGED_INV_MSN_RANGE      = 0x03,
+       DDP_UNTAGGED_INV_MO             = 0x04,
+       DDP_UNTAGGED_INV_TOO_LONG       = 0x05,
+       DDP_UNTAGGED_INV_DDP_VER        = 0x06
+};
+
+enum nes_term_mpa_errors {
+       MPA_CLOSED                      = 0x01,
+       MPA_CRC                         = 0x02,
+       MPA_MARKER                      = 0x03,
+       MPA_REQ_RSP                     = 0x04,
+};
+
+struct nes_terminate_hdr {
+       u8 layer_etype;
+       u8 error_code;
+       u8 hdrct;
+       u8 rsvd;
+};
+
+/* Used to determine how to fill in terminate error codes */
+#define IWARP_OPCODE_WRITE             0
+#define IWARP_OPCODE_READREQ           1
+#define IWARP_OPCODE_READRSP           2
+#define IWARP_OPCODE_SEND              3
+#define IWARP_OPCODE_SEND_INV          4
+#define IWARP_OPCODE_SEND_SE           5
+#define IWARP_OPCODE_SEND_SE_INV       6
+#define IWARP_OPCODE_TERM              7
+
+/* These values are used only during terminate processing */
+#define TERM_DDP_LEN_TAGGED    14
+#define TERM_DDP_LEN_UNTAGGED  18
+#define TERM_RDMA_LEN          28
+#define RDMA_OPCODE_MASK       0x0f
+#define RDMA_READ_REQ_OPCODE   1
+#define BAD_FRAME_OFFSET       64
+#define CQE_MAJOR_DRV          0x8000
+
 #define nes_vlan_rx vlan_hwaccel_receive_skb
 #define nes_netif_rx netif_receive_skb
 
index a282031d15c7088726c2d1834de8f5fd43775973..9687c397ce1ac7480e36692a022d358258ae3c51 100644 (file)
@@ -183,6 +183,9 @@ int nes_read_eeprom_values(struct nes_device *nesdev, struct nes_adapter *nesada
                } else if (((major_ver == 2) && (minor_ver > 21)) || ((major_ver > 2) && (major_ver != 255))) {
                        nesadapter->virtwq = 1;
                }
+               if (((major_ver == 3) && (minor_ver >= 16)) || (major_ver > 3))
+                       nesadapter->send_term_ok = 1;
+
                nesadapter->firmware_version = (((u32)(u8)(eeprom_data>>8))  <<  16) +
                                (u32)((u8)eeprom_data);
 
@@ -548,7 +551,7 @@ struct nes_cqp_request *nes_get_cqp_request(struct nes_device *nesdev)
                spin_unlock_irqrestore(&nesdev->cqp.lock, flags);
        }
        if (cqp_request == NULL) {
-               cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_KERNEL);
+               cqp_request = kzalloc(sizeof(struct nes_cqp_request), GFP_ATOMIC);
                if (cqp_request) {
                        cqp_request->dynamic = 1;
                        INIT_LIST_HEAD(&cqp_request->list);
index 21e0fd336cf710196dcbf2fe5bb3daecac7a21f4..a680c42d6e8cb3b72a33c5a6c707d15cc294b202 100644 (file)
@@ -667,15 +667,32 @@ static int nes_query_device(struct ib_device *ibdev, struct ib_device_attr *prop
  */
 static int nes_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props)
 {
+       struct nes_vnic *nesvnic = to_nesvnic(ibdev);
+       struct net_device *netdev = nesvnic->netdev;
+
        memset(props, 0, sizeof(*props));
 
-       props->max_mtu = IB_MTU_2048;
-       props->active_mtu = IB_MTU_2048;
+       props->max_mtu = IB_MTU_4096;
+
+       if (netdev->mtu  >= 4096)
+               props->active_mtu = IB_MTU_4096;
+       else if (netdev->mtu  >= 2048)
+               props->active_mtu = IB_MTU_2048;
+       else if (netdev->mtu  >= 1024)
+               props->active_mtu = IB_MTU_1024;
+       else if (netdev->mtu  >= 512)
+               props->active_mtu = IB_MTU_512;
+       else
+               props->active_mtu = IB_MTU_256;
+
        props->lid = 1;
        props->lmc = 0;
        props->sm_lid = 0;
        props->sm_sl = 0;
-       props->state = IB_PORT_ACTIVE;
+       if (nesvnic->linkup)
+               props->state = IB_PORT_ACTIVE;
+       else
+               props->state = IB_PORT_DOWN;
        props->phys_state = 0;
        props->port_cap_flags = IB_PORT_CM_SUP | IB_PORT_REINIT_SUP |
                        IB_PORT_VENDOR_CLASS_SUP | IB_PORT_BOOT_MGMT_SUP;
@@ -1505,13 +1522,46 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
 }
 
 
+/**
+ * nes_clean_cq
+ */
+static void nes_clean_cq(struct nes_qp *nesqp, struct nes_cq *nescq)
+{
+       u32 cq_head;
+       u32 lo;
+       u32 hi;
+       u64 u64temp;
+       unsigned long flags = 0;
+
+       spin_lock_irqsave(&nescq->lock, flags);
+
+       cq_head = nescq->hw_cq.cq_head;
+       while (le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_VALID) {
+               rmb();
+               lo = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+               hi = le32_to_cpu(nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX]);
+               u64temp = (((u64)hi) << 32) | ((u64)lo);
+               u64temp &= ~(NES_SW_CONTEXT_ALIGN-1);
+               if (u64temp == (u64)(unsigned long)nesqp) {
+                       /* Zero the context value so cqe will be ignored */
+                       nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] = 0;
+                       nescq->hw_cq.cq_vbase[cq_head].cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX] = 0;
+               }
+
+               if (++cq_head >= nescq->hw_cq.cq_size)
+                       cq_head = 0;
+       }
+
+       spin_unlock_irqrestore(&nescq->lock, flags);
+}
+
+
 /**
  * nes_destroy_qp
  */
 static int nes_destroy_qp(struct ib_qp *ibqp)
 {
        struct nes_qp *nesqp = to_nesqp(ibqp);
-       /* struct nes_vnic *nesvnic = to_nesvnic(ibqp->device); */
        struct nes_ucontext *nes_ucontext;
        struct ib_qp_attr attr;
        struct iw_cm_id *cm_id;
@@ -1548,7 +1598,6 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
                        nes_debug(NES_DBG_QP, "OFA CM event_handler returned, ret=%d\n", ret);
        }
 
-
        if (nesqp->user_mode) {
                if ((ibqp->uobject)&&(ibqp->uobject->context)) {
                        nes_ucontext = to_nesucontext(ibqp->uobject->context);
@@ -1560,6 +1609,13 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
                }
                if (nesqp->pbl_pbase)
                        kunmap(nesqp->page);
+       } else {
+               /* Clean any pending completions from the cq(s) */
+               if (nesqp->nesscq)
+                       nes_clean_cq(nesqp, nesqp->nesscq);
+
+               if ((nesqp->nesrcq) && (nesqp->nesrcq != nesqp->nesscq))
+                       nes_clean_cq(nesqp, nesqp->nesrcq);
        }
 
        nes_rem_ref(&nesqp->ibqp);
@@ -2884,7 +2940,7 @@ static int nes_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
  * nes_hw_modify_qp
  */
 int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
-               u32 next_iwarp_state, u32 wait_completion)
+               u32 next_iwarp_state, u32 termlen, u32 wait_completion)
 {
        struct nes_hw_cqp_wqe *cqp_wqe;
        /* struct iw_cm_id *cm_id = nesqp->cm_id; */
@@ -2916,6 +2972,13 @@ int nes_hw_modify_qp(struct nes_device *nesdev, struct nes_qp *nesqp,
        set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_WQE_ID_IDX, nesqp->hwqp.qp_id);
        set_wqe_64bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_CONTEXT_LOW_IDX, (u64)nesqp->nesqp_context_pbase);
 
+       /* If sending a terminate message, fill in the length (in words) */
+       if (((next_iwarp_state & NES_CQP_QP_IWARP_STATE_MASK) == NES_CQP_QP_IWARP_STATE_TERMINATE) &&
+           !(next_iwarp_state & NES_CQP_QP_TERM_DONT_SEND_TERM_MSG)) {
+               termlen = ((termlen + 3) >> 2) << NES_CQP_OP_TERMLEN_SHIFT;
+               set_wqe_32bit_value(cqp_wqe->wqe_words, NES_CQP_QP_WQE_NEW_MSS_IDX, termlen);
+       }
+
        atomic_set(&cqp_request->refcount, 2);
        nes_post_cqp_request(nesdev, cqp_request);
 
@@ -3086,6 +3149,9 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                                }
                                nes_debug(NES_DBG_MOD_QP, "QP%u: new state = error\n",
                                                nesqp->hwqp.qp_id);
+                               if (nesqp->term_flags)
+                                       del_timer(&nesqp->terminate_timer);
+
                                next_iwarp_state = NES_CQP_QP_IWARP_STATE_ERROR;
                                /* next_iwarp_state = (NES_CQP_QP_IWARP_STATE_TERMINATE | 0x02000000); */
                                        if (nesqp->hte_added) {
@@ -3163,7 +3229,7 @@ int nes_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
 
        if (issue_modify_qp) {
                nes_debug(NES_DBG_MOD_QP, "call nes_hw_modify_qp\n");
-               ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 1);
+               ret = nes_hw_modify_qp(nesdev, nesqp, next_iwarp_state, 0, 1);
                if (ret)
                        nes_debug(NES_DBG_MOD_QP, "nes_hw_modify_qp (next_iwarp_state = 0x%08X)"
                                        " failed for QP%u.\n",
@@ -3328,6 +3394,12 @@ static int nes_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,
        head = nesqp->hwqp.sq_head;
 
        while (ib_wr) {
+               /* Check for QP error */
+               if (nesqp->term_flags) {
+                       err = -EINVAL;
+                       break;
+               }
+
                /* Check for SQ overflow */
                if (((head + (2 * qsize) - nesqp->hwqp.sq_tail) % qsize) == (qsize - 1)) {
                        err = -EINVAL;
@@ -3484,6 +3556,12 @@ static int nes_post_recv(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,
        head = nesqp->hwqp.rq_head;
 
        while (ib_wr) {
+               /* Check for QP error */
+               if (nesqp->term_flags) {
+                       err = -EINVAL;
+                       break;
+               }
+
                if (ib_wr->num_sge > nesdev->nesadapter->max_sge) {
                        err = -EINVAL;
                        break;
@@ -3547,7 +3625,6 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
 {
        u64 u64temp;
        u64 wrid;
-       /* u64 u64temp; */
        unsigned long flags = 0;
        struct nes_vnic *nesvnic = to_nesvnic(ibcq->device);
        struct nes_device *nesdev = nesvnic->nesdev;
@@ -3555,12 +3632,13 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
        struct nes_qp *nesqp;
        struct nes_hw_cqe cqe;
        u32 head;
-       u32 wq_tail;
+       u32 wq_tail = 0;
        u32 cq_size;
        u32 cqe_count = 0;
        u32 wqe_index;
        u32 u32temp;
-       /* u32 counter; */
+       u32 move_cq_head = 1;
+       u32 err_code;
 
        nes_debug(NES_DBG_CQ, "\n");
 
@@ -3570,29 +3648,40 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
        cq_size = nescq->hw_cq.cq_size;
 
        while (cqe_count < num_entries) {
-               if (le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
-                               NES_CQE_VALID) {
-                       /*
-                        * Make sure we read CQ entry contents *after*
-                        * we've checked the valid bit.
-                        */
-                       rmb();
-
-                       cqe = nescq->hw_cq.cq_vbase[head];
-                       nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
-                       u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
-                       wqe_index = u32temp &
-                                       (nesdev->nesadapter->max_qp_wr - 1);
-                       u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
-                       /* parse CQE, get completion context from WQE (either rq or sq */
-                       u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
-                                       ((u64)u32temp);
-                       nesqp = *((struct nes_qp **)&u64temp);
+               if ((le32_to_cpu(nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX]) &
+                               NES_CQE_VALID) == 0)
+                       break;
+
+               /*
+                * Make sure we read CQ entry contents *after*
+                * we've checked the valid bit.
+                */
+               rmb();
+
+               cqe = nescq->hw_cq.cq_vbase[head];
+               u32temp = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+               wqe_index = u32temp & (nesdev->nesadapter->max_qp_wr - 1);
+               u32temp &= ~(NES_SW_CONTEXT_ALIGN-1);
+               /* parse CQE, get completion context from WQE (either rq or sq) */
+               u64temp = (((u64)(le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_HIGH_IDX])))<<32) |
+                               ((u64)u32temp);
+
+               if (u64temp) {
+                       nesqp = (struct nes_qp *)(unsigned long)u64temp;
                        memset(entry, 0, sizeof *entry);
                        if (cqe.cqe_words[NES_CQE_ERROR_CODE_IDX] == 0) {
                                entry->status = IB_WC_SUCCESS;
                        } else {
-                               entry->status = IB_WC_WR_FLUSH_ERR;
+                               err_code = le32_to_cpu(cqe.cqe_words[NES_CQE_ERROR_CODE_IDX]);
+                               if (NES_IWARP_CQE_MAJOR_DRV == (err_code >> 16)) {
+                                       entry->status = err_code & 0x0000ffff;
+
+                                       /* The rest of the cqe's will be marked as flushed */
+                                       nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_ERROR_CODE_IDX] =
+                                               cpu_to_le32((NES_IWARP_CQE_MAJOR_FLUSH << 16) |
+                                                           NES_IWARP_CQE_MINOR_FLUSH);
+                               } else
+                                       entry->status = IB_WC_WR_FLUSH_ERR;
                        }
 
                        entry->qp = &nesqp->ibqp;
@@ -3601,20 +3690,18 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
                        if (le32_to_cpu(cqe.cqe_words[NES_CQE_OPCODE_IDX]) & NES_CQE_SQ) {
                                if (nesqp->skip_lsmm) {
                                        nesqp->skip_lsmm = 0;
-                                       wq_tail = nesqp->hwqp.sq_tail++;
+                                       nesqp->hwqp.sq_tail++;
                                }
 
                                /* Working on a SQ Completion*/
-                               wq_tail = wqe_index;
-                               nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
-                               wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+                               wrid = (((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
                                                wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_HIGH_IDX]))) << 32) |
-                                               ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wq_tail].
+                                               ((u64)(cpu_to_le32((u32)nesqp->hwqp.sq_vbase[wqe_index].
                                                wqe_words[NES_IWARP_SQ_WQE_COMP_SCRATCH_LOW_IDX])));
-                               entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+                               entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
                                                wqe_words[NES_IWARP_SQ_WQE_TOTAL_PAYLOAD_IDX]);
 
-                               switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+                               switch (le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
                                                wqe_words[NES_IWARP_SQ_WQE_MISC_IDX]) & 0x3f) {
                                        case NES_IWARP_SQ_OP_RDMAW:
                                                nes_debug(NES_DBG_CQ, "Operation = RDMA WRITE.\n");
@@ -3623,7 +3710,7 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
                                        case NES_IWARP_SQ_OP_RDMAR:
                                                nes_debug(NES_DBG_CQ, "Operation = RDMA READ.\n");
                                                entry->opcode = IB_WC_RDMA_READ;
-                                               entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wq_tail].
+                                               entry->byte_len = le32_to_cpu(nesqp->hwqp.sq_vbase[wqe_index].
                                                                wqe_words[NES_IWARP_SQ_WQE_RDMA_LENGTH_IDX]);
                                                break;
                                        case NES_IWARP_SQ_OP_SENDINV:
@@ -3634,33 +3721,54 @@ static int nes_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
                                                entry->opcode = IB_WC_SEND;
                                                break;
                                }
+
+                               nesqp->hwqp.sq_tail = (wqe_index+1)&(nesqp->hwqp.sq_size - 1);
+                               if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.sq_tail != nesqp->hwqp.sq_head)) {
+                                       move_cq_head = 0;
+                                       wq_tail = nesqp->hwqp.sq_tail;
+                               }
                        } else {
                                /* Working on a RQ Completion*/
-                               wq_tail = wqe_index;
-                                       nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
                                entry->byte_len = le32_to_cpu(cqe.cqe_words[NES_CQE_PAYLOAD_LENGTH_IDX]);
-                               wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
-                                       ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wq_tail].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
+                               wrid = ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_LOW_IDX]))) |
+                                       ((u64)(le32_to_cpu(nesqp->hwqp.rq_vbase[wqe_index].wqe_words[NES_IWARP_RQ_WQE_COMP_SCRATCH_HIGH_IDX]))<<32);
                                        entry->opcode = IB_WC_RECV;
+
+                               nesqp->hwqp.rq_tail = (wqe_index+1)&(nesqp->hwqp.rq_size - 1);
+                               if ((entry->status != IB_WC_SUCCESS) && (nesqp->hwqp.rq_tail != nesqp->hwqp.rq_head)) {
+                                       move_cq_head = 0;
+                                       wq_tail = nesqp->hwqp.rq_tail;
+                               }
                        }
+
                        entry->wr_id = wrid;
+                       entry++;
+                       cqe_count++;
+               }
 
+               if (move_cq_head) {
+                       nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_OPCODE_IDX] = 0;
                        if (++head >= cq_size)
                                head = 0;
-                       cqe_count++;
                        nescq->polled_completions++;
+
                        if ((nescq->polled_completions > (cq_size / 2)) ||
                                        (nescq->polled_completions == 255)) {
                                nes_debug(NES_DBG_CQ, "CQ%u Issuing CQE Allocate since more than half of cqes"
-                                               " are pending %u of %u.\n",
-                                               nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
+                                       " are pending %u of %u.\n",
+                                       nescq->hw_cq.cq_number, nescq->polled_completions, cq_size);
                                nes_write32(nesdev->regs+NES_CQE_ALLOC,
-                                               nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
+                                       nescq->hw_cq.cq_number | (nescq->polled_completions << 16));
                                nescq->polled_completions = 0;
                        }
-                       entry++;
-               } else
-                       break;
+               } else {
+                       /* Update the wqe index and set status to flush */
+                       wqe_index = le32_to_cpu(cqe.cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX]);
+                       wqe_index = (wqe_index & (~(nesdev->nesadapter->max_qp_wr - 1))) | wq_tail;
+                       nescq->hw_cq.cq_vbase[head].cqe_words[NES_CQE_COMP_COMP_CTX_LOW_IDX] =
+                               cpu_to_le32(wqe_index);
+                       move_cq_head = 1; /* ready for next pass */
+               }
        }
 
        if (nescq->polled_completions) {
index 41c07f29f7c9ce3f7105308801617739375c90c4..89822d75f82ec581375bd87c9336f600eed6c7d1 100644 (file)
@@ -40,6 +40,10 @@ struct nes_device;
 #define NES_MAX_USER_DB_REGIONS  4096
 #define NES_MAX_USER_WQ_REGIONS  4096
 
+#define NES_TERM_SENT            0x01
+#define NES_TERM_RCVD            0x02
+#define NES_TERM_DONE            0x04
+
 struct nes_ucontext {
        struct ib_ucontext ibucontext;
        struct nes_device  *nesdev;
@@ -119,6 +123,11 @@ struct nes_wq {
        spinlock_t lock;
 };
 
+struct disconn_work {
+       struct work_struct    work;
+       struct nes_qp         *nesqp;
+};
+
 struct iw_cm_id;
 struct ietf_mpa_frame;
 
@@ -127,7 +136,6 @@ struct nes_qp {
        void                  *allocated_buffer;
        struct iw_cm_id       *cm_id;
        struct workqueue_struct *wq;
-       struct work_struct    disconn_work;
        struct nes_cq         *nesscq;
        struct nes_cq         *nesrcq;
        struct nes_pd         *nespd;
@@ -155,9 +163,13 @@ struct nes_qp {
        void                  *pbl_vbase;
        dma_addr_t            pbl_pbase;
        struct page           *page;
+       struct timer_list     terminate_timer;
+       enum ib_event_type    terminate_eventtype;
        wait_queue_head_t     kick_waitq;
        u16                   in_disconnect;
        u16                   private_data_len;
+       u16                   term_sq_flush_code;
+       u16                   term_rq_flush_code;
        u8                    active_conn;
        u8                    skip_lsmm;
        u8                    user_mode;
@@ -165,7 +177,7 @@ struct nes_qp {
        u8                    hw_iwarp_state;
        u8                    flush_issued;
        u8                    hw_tcp_state;
-       u8                    disconn_pending;
+       u8                    term_flags;
        u8                    destroyed;
 };
 #endif                 /* NES_VERBS_H */
index 181b1f32325f3a009ce13af55c7aafa66dd67177..8f4b4fca2a1d9dbd26964771f6baa99614884455 100644 (file)
@@ -31,7 +31,6 @@
  */
 
 #include <rdma/ib_cm.h>
-#include <rdma/ib_cache.h>
 #include <net/dst.h>
 #include <net/icmp.h>
 #include <linux/icmpv6.h>
index e7e5adf84e840e3f13aa54dc1c1036f6d4a81846..e35f4a0ea9d5ffd457fef7becca2ed36685fb73b 100644 (file)
@@ -36,7 +36,6 @@
 #include <linux/delay.h>
 #include <linux/dma-mapping.h>
 
-#include <rdma/ib_cache.h>
 #include <linux/ip.h>
 #include <linux/tcp.h>
 
index e319d91f60a609be13d3e18c94df9477207178c6..2bf5116deec41d7109b1b6b3b6ccc141ae3ac052 100644 (file)
@@ -604,8 +604,11 @@ static void neigh_add_path(struct sk_buff *skb, struct net_device *dev)
                                           skb_queue_len(&neigh->queue));
                                goto err_drop;
                        }
-               } else
+               } else {
+                       spin_unlock_irqrestore(&priv->lock, flags);
                        ipoib_send(dev, skb, path->ah, IPOIB_QPN(skb_dst(skb)->neighbour->ha));
+                       return;
+               }
        } else {
                neigh->ah  = NULL;
 
@@ -688,7 +691,9 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev,
                ipoib_dbg(priv, "Send unicast ARP to %04x\n",
                          be16_to_cpu(path->pathrec.dlid));
 
+               spin_unlock_irqrestore(&priv->lock, flags);
                ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr));
+               return;
        } else if ((path->query || !path_rec_start(dev, path)) &&
                   skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) {
                /* put pseudoheader back on for next time */
index a0e97532e7140f422fbd1e40b1a57626f8e2396b..25874fc680c99d74a0402b29000d6569e0a7a9b7 100644 (file)
@@ -720,7 +720,9 @@ out:
                        }
                }
 
+               spin_unlock_irqrestore(&priv->lock, flags);
                ipoib_send(dev, skb, mcast->ah, IB_MULTICAST_QPN);
+               return;
        }
 
 unlock:
@@ -758,6 +760,20 @@ void ipoib_mcast_dev_flush(struct net_device *dev)
        }
 }
 
+static int ipoib_mcast_addr_is_valid(const u8 *addr, unsigned int addrlen,
+                                    const u8 *broadcast)
+{
+       if (addrlen != INFINIBAND_ALEN)
+               return 0;
+       /* reserved QPN, prefix, scope */
+       if (memcmp(addr, broadcast, 6))
+               return 0;
+       /* signature lower, pkey */
+       if (memcmp(addr + 7, broadcast + 7, 3))
+               return 0;
+       return 1;
+}
+
 void ipoib_mcast_restart_task(struct work_struct *work)
 {
        struct ipoib_dev_priv *priv =
@@ -791,6 +807,11 @@ void ipoib_mcast_restart_task(struct work_struct *work)
        for (mclist = dev->mc_list; mclist; mclist = mclist->next) {
                union ib_gid mgid;
 
+               if (!ipoib_mcast_addr_is_valid(mclist->dmi_addr,
+                                              mclist->dmi_addrlen,
+                                              dev->broadcast))
+                       continue;
+
                memcpy(mgid.raw, mclist->dmi_addr + 4, sizeof mgid);
 
                mcast = __ipoib_mcast_find(dev, &mgid);
index fb5df5c6203e9541849fce56a52abfaedfbe7391..c97ab82ec743b5edd1371dcdf3d3e64afc198dfd 100644 (file)
@@ -1286,6 +1286,7 @@ static int cxgb_open(struct net_device *dev)
        if (!other_ports)
                schedule_chk_task(adapter);
 
+       cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_UP, pi->port_id);
        return 0;
 }
 
@@ -1318,6 +1319,7 @@ static int cxgb_close(struct net_device *dev)
        if (!adapter->open_device_map)
                cxgb_down(adapter);
 
+       cxgb3_event_notify(&adapter->tdev, OFFLOAD_PORT_DOWN, pi->port_id);
        return 0;
 }
 
@@ -2717,7 +2719,7 @@ static int t3_adapter_error(struct adapter *adapter, int reset)
 
        if (is_offload(adapter) &&
            test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) {
-               cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
+               cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_DOWN, 0);
                offload_close(&adapter->tdev);
        }
 
@@ -2782,7 +2784,7 @@ static void t3_resume_ports(struct adapter *adapter)
        }
 
        if (is_offload(adapter) && !ofld_disable)
-               cxgb3_err_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
+               cxgb3_event_notify(&adapter->tdev, OFFLOAD_STATUS_UP, 0);
 }
 
 /*
index f9f54b57b28ca9af177f00b52923f607f2979f64..75064eea1d87eea36acb36b5351f7f5a16855adb 100644 (file)
@@ -153,14 +153,14 @@ void cxgb3_remove_clients(struct t3cdev *tdev)
        mutex_unlock(&cxgb3_db_lock);
 }
 
-void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error)
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
 {
        struct cxgb3_client *client;
 
        mutex_lock(&cxgb3_db_lock);
        list_for_each_entry(client, &client_list, client_list) {
-               if (client->err_handler)
-                       client->err_handler(tdev, status, error);
+               if (client->event_handler)
+                       client->event_handler(tdev, event, port);
        }
        mutex_unlock(&cxgb3_db_lock);
 }
index 55945f422aec0f70e2f608d1dd55cc23375003ae..670aa62042daa8699b5aa0f4e2dfb6f3822003f1 100644 (file)
@@ -64,14 +64,16 @@ void cxgb3_register_client(struct cxgb3_client *client);
 void cxgb3_unregister_client(struct cxgb3_client *client);
 void cxgb3_add_clients(struct t3cdev *tdev);
 void cxgb3_remove_clients(struct t3cdev *tdev);
-void cxgb3_err_notify(struct t3cdev *tdev, u32 status, u32 error);
+void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port);
 
 typedef int (*cxgb3_cpl_handler_func)(struct t3cdev *dev,
                                      struct sk_buff *skb, void *ctx);
 
 enum {
        OFFLOAD_STATUS_UP,
-       OFFLOAD_STATUS_DOWN
+       OFFLOAD_STATUS_DOWN,
+       OFFLOAD_PORT_DOWN,
+       OFFLOAD_PORT_UP
 };
 
 struct cxgb3_client {
@@ -82,7 +84,7 @@ struct cxgb3_client {
        int (*redirect)(void *ctx, struct dst_entry *old,
                        struct dst_entry *new, struct l2t_entry *l2t);
        struct list_head client_list;
-       void (*err_handler)(struct t3cdev *tdev, u32 status, u32 error);
+       void (*event_handler)(struct t3cdev *tdev, u32 event, u32 port);
 };
 
 /*
index ac57b6a42c6ee8791fcca7f385a2c703b4e96799..ccfe276943f09f7e036899d90a09ea6220ab77cf 100644 (file)
@@ -34,7 +34,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/hardirq.h>
 
 #include <linux/mlx4/cmd.h>
index b9ceddde46c0adf5f7e9602235bef02cd534babf..bffb7995cb70a4f6a9b09bdcc137cd317e3a641e 100644 (file)
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/interrupt.h>
 #include <linux/mm.h>
 #include <linux/dma-mapping.h>
 #include "mlx4.h"
 #include "fw.h"
 
+enum {
+       MLX4_IRQNAME_SIZE       = 64
+};
+
 enum {
        MLX4_NUM_ASYNC_EQE      = 0x100,
        MLX4_NUM_SPARE_EQE      = 0x80,
@@ -526,48 +529,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
        iounmap(priv->clr_base);
 }
 
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-       int ret;
-
-       /*
-        * We assume that mapping one page is enough for the whole EQ
-        * context table.  This is fine with all current HCAs, because
-        * we only use 32 EQs and each EQ uses 64 bytes of context
-        * memory, or 1 KB total.
-        */
-       priv->eq_table.icm_virt = icm_virt;
-       priv->eq_table.icm_page = alloc_page(GFP_HIGHUSER);
-       if (!priv->eq_table.icm_page)
-               return -ENOMEM;
-       priv->eq_table.icm_dma  = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
-                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
-               __free_page(priv->eq_table.icm_page);
-               return -ENOMEM;
-       }
-
-       ret = mlx4_MAP_ICM_page(dev, priv->eq_table.icm_dma, icm_virt);
-       if (ret) {
-               pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
-                              PCI_DMA_BIDIRECTIONAL);
-               __free_page(priv->eq_table.icm_page);
-       }
-
-       return ret;
-}
-
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev)
-{
-       struct mlx4_priv *priv = mlx4_priv(dev);
-
-       mlx4_UNMAP_ICM(dev, priv->eq_table.icm_virt, 1);
-       pci_unmap_page(dev->pdev, priv->eq_table.icm_dma, PAGE_SIZE,
-                      PCI_DMA_BIDIRECTIONAL);
-       __free_page(priv->eq_table.icm_page);
-}
-
 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -615,7 +576,9 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
        priv->eq_table.clr_int  = priv->clr_base +
                (priv->eq_table.inta_pin < 32 ? 4 : 0);
 
-       priv->eq_table.irq_names = kmalloc(16 * dev->caps.num_comp_vectors, GFP_KERNEL);
+       priv->eq_table.irq_names =
+               kmalloc(MLX4_IRQNAME_SIZE * (dev->caps.num_comp_vectors + 1),
+                       GFP_KERNEL);
        if (!priv->eq_table.irq_names) {
                err = -ENOMEM;
                goto err_out_bitmap;
@@ -638,17 +601,25 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                goto err_out_comp;
 
        if (dev->flags & MLX4_FLAG_MSI_X) {
-               static const char async_eq_name[] = "mlx4-async";
                const char *eq_name;
 
                for (i = 0; i < dev->caps.num_comp_vectors + 1; ++i) {
                        if (i < dev->caps.num_comp_vectors) {
-                               snprintf(priv->eq_table.irq_names + i * 16, 16,
-                                        "mlx4-comp-%d", i);
-                               eq_name = priv->eq_table.irq_names + i * 16;
-                       } else
-                               eq_name = async_eq_name;
+                               snprintf(priv->eq_table.irq_names +
+                                        i * MLX4_IRQNAME_SIZE,
+                                        MLX4_IRQNAME_SIZE,
+                                        "mlx4-comp-%d@pci:%s", i,
+                                        pci_name(dev->pdev));
+                       } else {
+                               snprintf(priv->eq_table.irq_names +
+                                        i * MLX4_IRQNAME_SIZE,
+                                        MLX4_IRQNAME_SIZE,
+                                        "mlx4-async@pci:%s",
+                                        pci_name(dev->pdev));
+                       }
 
+                       eq_name = priv->eq_table.irq_names +
+                                 i * MLX4_IRQNAME_SIZE;
                        err = request_irq(priv->eq_table.eq[i].irq,
                                          mlx4_msi_x_interrupt, 0, eq_name,
                                          priv->eq_table.eq + i);
@@ -658,8 +629,12 @@ int mlx4_init_eq_table(struct mlx4_dev *dev)
                        priv->eq_table.eq[i].have_irq = 1;
                }
        } else {
+               snprintf(priv->eq_table.irq_names,
+                        MLX4_IRQNAME_SIZE,
+                        DRV_NAME "@pci:%s",
+                        pci_name(dev->pdev));
                err = request_irq(dev->pdev->irq, mlx4_interrupt,
-                                 IRQF_SHARED, DRV_NAME, dev);
+                                 IRQF_SHARED, priv->eq_table.irq_names, dev);
                if (err)
                        goto err_out_async;
 
index baf4bf66062ca6a91652eb6bd5e5224b3ffb9440..04b382fcb8c881c2ab76fea05636deb63c64effd 100644 (file)
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/mm.h>
 #include <linux/scatterlist.h>
index dac621b1e9fc9a287fb8b4f7fdfa4f538ecc2a77..3dd481e77f92f1f5cff4af9cfd9d9975439520af 100644 (file)
@@ -525,7 +525,10 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
                goto err_unmap_aux;
        }
 
-       err = mlx4_map_eq_icm(dev, init_hca->eqc_base);
+       err = mlx4_init_icm_table(dev, &priv->eq_table.table,
+                                 init_hca->eqc_base, dev_cap->eqc_entry_sz,
+                                 dev->caps.num_eqs, dev->caps.num_eqs,
+                                 0, 0);
        if (err) {
                mlx4_err(dev, "Failed to map EQ context memory, aborting.\n");
                goto err_unmap_cmpt;
@@ -668,7 +671,7 @@ err_unmap_mtt:
        mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
 
 err_unmap_eq:
-       mlx4_unmap_eq_icm(dev);
+       mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
 
 err_unmap_cmpt:
        mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
@@ -698,11 +701,11 @@ static void mlx4_free_icms(struct mlx4_dev *dev)
        mlx4_cleanup_icm_table(dev, &priv->qp_table.qp_table);
        mlx4_cleanup_icm_table(dev, &priv->mr_table.dmpt_table);
        mlx4_cleanup_icm_table(dev, &priv->mr_table.mtt_table);
+       mlx4_cleanup_icm_table(dev, &priv->eq_table.table);
        mlx4_cleanup_icm_table(dev, &priv->eq_table.cmpt_table);
        mlx4_cleanup_icm_table(dev, &priv->cq_table.cmpt_table);
        mlx4_cleanup_icm_table(dev, &priv->srq_table.cmpt_table);
        mlx4_cleanup_icm_table(dev, &priv->qp_table.cmpt_table);
-       mlx4_unmap_eq_icm(dev);
 
        mlx4_UNMAP_ICM_AUX(dev);
        mlx4_free_icm(dev, priv->fw.aux_icm, 0);
@@ -786,7 +789,7 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
        return 0;
 
 err_close:
-       mlx4_close_hca(dev);
+       mlx4_CLOSE_HCA(dev, 0);
 
 err_free_icm:
        mlx4_free_icms(dev);
@@ -1070,18 +1073,12 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                goto err_disable_pdev;
        }
 
-       err = pci_request_region(pdev, 0, DRV_NAME);
+       err = pci_request_regions(pdev, DRV_NAME);
        if (err) {
-               dev_err(&pdev->dev, "Cannot request control region, aborting.\n");
+               dev_err(&pdev->dev, "Couldn't get PCI resources, aborting\n");
                goto err_disable_pdev;
        }
 
-       err = pci_request_region(pdev, 2, DRV_NAME);
-       if (err) {
-               dev_err(&pdev->dev, "Cannot request UAR region, aborting.\n");
-               goto err_release_bar0;
-       }
-
        pci_set_master(pdev);
 
        err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1090,7 +1087,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
                if (err) {
                        dev_err(&pdev->dev, "Can't set PCI DMA mask, aborting.\n");
-                       goto err_release_bar2;
+                       goto err_release_regions;
                }
        }
        err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
@@ -1101,7 +1098,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                if (err) {
                        dev_err(&pdev->dev, "Can't set consistent PCI DMA mask, "
                                "aborting.\n");
-                       goto err_release_bar2;
+                       goto err_release_regions;
                }
        }
 
@@ -1110,7 +1107,7 @@ static int __mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
                dev_err(&pdev->dev, "Device struct alloc failed, "
                        "aborting.\n");
                err = -ENOMEM;
-               goto err_release_bar2;
+               goto err_release_regions;
        }
 
        dev       = &priv->dev;
@@ -1205,11 +1202,8 @@ err_cmd:
 err_free_dev:
        kfree(priv);
 
-err_release_bar2:
-       pci_release_region(pdev, 2);
-
-err_release_bar0:
-       pci_release_region(pdev, 0);
+err_release_regions:
+       pci_release_regions(pdev);
 
 err_disable_pdev:
        pci_disable_device(pdev);
@@ -1265,8 +1259,7 @@ static void mlx4_remove_one(struct pci_dev *pdev)
                        pci_disable_msix(pdev);
 
                kfree(priv);
-               pci_release_region(pdev, 2);
-               pci_release_region(pdev, 0);
+               pci_release_regions(pdev);
                pci_disable_device(pdev);
                pci_set_drvdata(pdev, NULL);
        }
index 6053c357a470c27bdc1cf827d24388269f413774..5ccbce9866fe0b70918f362fba4b9a6e4bef8da3 100644 (file)
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/string.h>
 #include <linux/slab.h>
 
index 5bd79c2b184fc0f5875d0a74f1956db721344e51..bc72d6e4919b8542b0721b797edd67560e31fb86 100644 (file)
@@ -205,9 +205,7 @@ struct mlx4_eq_table {
        void __iomem          **uar_map;
        u32                     clr_mask;
        struct mlx4_eq         *eq;
-       u64                     icm_virt;
-       struct page            *icm_page;
-       dma_addr_t              icm_dma;
+       struct mlx4_icm_table   table;
        struct mlx4_icm_table   cmpt_table;
        int                     have_irq;
        u8                      inta_pin;
@@ -373,9 +371,6 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
                      struct mlx4_dev_cap *dev_cap,
                      struct mlx4_init_hca_param *init_hca);
 
-int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt);
-void mlx4_unmap_eq_icm(struct mlx4_dev *dev);
-
 int mlx4_cmd_init(struct mlx4_dev *dev);
 void mlx4_cmd_cleanup(struct mlx4_dev *dev);
 void mlx4_cmd_event(struct mlx4_dev *dev, u16 token, u8 status, u64 out_param);
index f96948be0a449248243e6f423708dc4b9e1c2b53..ca7ab8e7b4cc3917cc94599315f6d16f8e930150 100644 (file)
@@ -32,7 +32,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 
 #include <linux/mlx4/cmd.h>
index 26d1a7a9e375e55db351c29919b3187215e6c784..c4988d6bd5b2f38bc92aaa6284bfe255ae3ff559 100644 (file)
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 
 #include <asm/page.h>
index bd22df95adf9a51fa76632404bbf9488df833b5b..ca25b9dc837853e979650a3c5fc8b01f6a71e961 100644 (file)
@@ -32,8 +32,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
-
 #include "mlx4.h"
 #include "fw.h"
 
index 1c565ef8d179148ead7408457778dff904f9d439..42ab9fc01d3e1f04e40623f194903fbecc11568e 100644 (file)
@@ -33,8 +33,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
-
 #include <linux/mlx4/cmd.h>
 #include <linux/mlx4/qp.h>
 
index 3951b884c0fba94abebab3e254de14cdf99c63d2..e5741dab3825fb6a3f17ac6b87ead4203ec8064b 100644 (file)
@@ -31,7 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
 #include <linux/errno.h>
 #include <linux/pci.h>
 #include <linux/delay.h>
index fe9f218691f5f0d48306b3586efe87a4b47a9007..1377d0dc8f1f41d7de9f35d9a97bf49c0e0c433b 100644 (file)
@@ -31,8 +31,6 @@
  * SOFTWARE.
  */
 
-#include <linux/init.h>
-
 #include <linux/mlx4/cmd.h>
 
 #include "mlx4.h"
index 042d9bce9914550ad37ba68937195b6fdf4e1dd9..d0ab23a583558a5b66cb856e760015140a7e410f 100644 (file)
@@ -26,7 +26,7 @@ MODULE_VERSION(DRV_MODULE_VERSION);
 
 static void open_s3_dev(struct t3cdev *);
 static void close_s3_dev(struct t3cdev *);
-static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error);
+static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port);
 
 static cxgb3_cpl_handler_func cxgb3i_cpl_handlers[NUM_CPL_CMDS];
 static struct cxgb3_client t3c_client = {
@@ -34,7 +34,7 @@ static struct cxgb3_client t3c_client = {
        .handlers = cxgb3i_cpl_handlers,
        .add = open_s3_dev,
        .remove = close_s3_dev,
-       .err_handler = s3_err_handler,
+       .event_handler = s3_event_handler,
 };
 
 /**
@@ -66,16 +66,16 @@ static void close_s3_dev(struct t3cdev *t3dev)
        cxgb3i_ddp_cleanup(t3dev);
 }
 
-static void s3_err_handler(struct t3cdev *tdev, u32 status, u32 error)
+static void s3_event_handler(struct t3cdev *tdev, u32 event, u32 port)
 {
        struct cxgb3i_adapter *snic = cxgb3i_adapter_find_by_tdev(tdev);
 
-       cxgb3i_log_info("snic 0x%p, tdev 0x%p, status 0x%x, err 0x%x.\n",
-                       snic, tdev, status, error);
+       cxgb3i_log_info("snic 0x%p, tdev 0x%p, event 0x%x, port 0x%x.\n",
+                       snic, tdev, event, port);
        if (!snic)
                return;
 
-       switch (status) {
+       switch (event) {
        case OFFLOAD_STATUS_DOWN:
                snic->flags |= CXGB3I_ADAPTER_FLAG_RESET;
                break;