]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
IB/ipath: Add barrier before updating WC head in shared memory
authorRalph Campbell <ralph.campbell@qlogic.com>
Fri, 6 Jul 2007 19:48:23 +0000 (12:48 -0700)
committerRoland Dreier <rolandd@cisco.com>
Tue, 10 Jul 2007 03:12:26 +0000 (20:12 -0700)
Add a barrier to make sure the CPU doesn't reorder writes to memory,
since user programs can be polling on the head index update and the
entry should be written before that.

Signed-off-by: Ralph Campbell <ralph.campbell@qlogic.com>
Signed-off-by: Roland Dreier <rolandd@cisco.com>
drivers/infiniband/hw/ipath/ipath_cq.c
drivers/infiniband/hw/ipath/ipath_ruc.c
drivers/infiniband/hw/ipath/ipath_srq.c
drivers/infiniband/hw/ipath/ipath_ud.c
drivers/infiniband/hw/ipath/ipath_verbs.c

index 9014ef63eedc607d07d0321ea0e48385c6ddcb93..a6f04d27ec576c021061df4d13d8f18e3fde61e1 100644 (file)
@@ -90,6 +90,8 @@ void ipath_cq_enter(struct ipath_cq *cq, struct ib_wc *entry, int solicited)
        wc->queue[head].sl = entry->sl;
        wc->queue[head].dlid_path_bits = entry->dlid_path_bits;
        wc->queue[head].port_num = entry->port_num;
+       /* Make sure queue entry is written before the head index. */
+       smp_wmb();
        wc->head = next;
 
        if (cq->notify == IB_CQ_NEXT_COMP ||
@@ -139,7 +141,8 @@ int ipath_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *entry)
 
                if (tail == wc->head)
                        break;
-
+               /* Make sure entry is read after head index is read. */
+               smp_rmb();
                qp = ipath_lookup_qpn(&to_idev(cq->ibcq.device)->qp_table,
                                      wc->queue[tail].qp_num);
                entry->qp = &qp->ibqp;
index 854deb56ac02c02b33ccfe45a0561d9669fc5958..85256747d8a185223da6aaf4eab5dd8287d04908 100644 (file)
@@ -194,6 +194,8 @@ int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
                        ret = 0;
                        goto bail;
                }
+               /* Make sure entry is read after head index is read. */
+               smp_rmb();
                wqe = get_rwqe_ptr(rq, tail);
                if (++tail >= rq->size)
                        tail = 0;
index 14cbbd633d34990a26d3eb853416f78ca7dce0b2..40c36ec190167080784383f457dff4c541a21f71 100644 (file)
@@ -80,6 +80,8 @@ int ipath_post_srq_receive(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
                wqe->num_sge = wr->num_sge;
                for (i = 0; i < wr->num_sge; i++)
                        wqe->sg_list[i] = wr->sg_list[i];
+               /* Make sure queue entry is written before the head index. */
+               smp_wmb();
                wq->head = next;
                spin_unlock_irqrestore(&srq->rq.lock, flags);
        }
index 38ba771b3efe402e9771c20454f3d647f0eae532..f9a3338a5fb7aa6e90b57c44e88c3b55fe8de87f 100644 (file)
@@ -176,6 +176,8 @@ static void ipath_ud_loopback(struct ipath_qp *sqp,
                        dev->n_pkt_drops++;
                        goto bail_sge;
                }
+               /* Make sure entry is read after head index is read. */
+               smp_rmb();
                wqe = get_rwqe_ptr(rq, tail);
                if (++tail >= rq->size)
                        tail = 0;
index 0aecded6af869668ea066cbd574deee1ab489d36..c76ea0e0b02418a71193a87bf41857010977a58a 100644 (file)
@@ -327,6 +327,8 @@ static int ipath_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                wqe->num_sge = wr->num_sge;
                for (i = 0; i < wr->num_sge; i++)
                        wqe->sg_list[i] = wr->sg_list[i];
+               /* Make sure queue entry is written before the head index. */
+               smp_wmb();
                wq->head = next;
                spin_unlock_irqrestore(&qp->r_rq.lock, flags);
        }