2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
35 #include <linux/device.h>
36 #include <linux/dmapool.h>
42 * Convert IB-specific error message to RDS error message and call core
45 static void rds_ib_send_complete(struct rds_message *rm,
47 void (*complete)(struct rds_message *rm, int status))
52 case IB_WC_WR_FLUSH_ERR:
56 notify_status = RDS_RDMA_SUCCESS;
59 case IB_WC_REM_ACCESS_ERR:
60 notify_status = RDS_RDMA_REMOTE_ERROR;
64 notify_status = RDS_RDMA_OTHER_ERROR;
67 complete(rm, notify_status);
70 static void rds_ib_send_unmap_rm(struct rds_ib_connection *ic,
71 struct rds_ib_send_work *send,
74 struct rds_message *rm = send->s_rm;
76 rdsdebug("ic %p send %p rm %p\n", ic, send, rm);
78 ib_dma_unmap_sg(ic->i_cm_id->device,
79 rm->data.m_sg, rm->data.m_nents,
82 if (rm->rdma.m_rdma_op.r_active) {
83 struct rds_rdma_op *op = &rm->rdma.m_rdma_op;
86 ib_dma_unmap_sg(ic->i_cm_id->device,
87 op->r_sg, op->r_nents,
88 op->r_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
92 /* If the user asked for a completion notification on this
93 * message, we can implement three different semantics:
94 * 1. Notify when we received the ACK on the RDS message
95 * that was queued with the RDMA. This provides reliable
96 * notification of RDMA status at the expense of a one-way
98 * 2. Notify when the IB stack gives us the completion event for
100 * 3. Notify when the IB stack gives us the completion event for
101 * the accompanying RDS messages.
102 * Here, we implement approach #3. To implement approach #2,
103 * call rds_rdma_send_complete from the cq_handler. To implement #1,
104 * don't call rds_rdma_send_complete at all, and fall back to the notify
105 * handling in the ACK processing code.
107 * Note: There's no need to explicitly sync any RDMA buffers using
108 * ib_dma_sync_sg_for_cpu - the completion for the RDMA
109 * operation itself unmapped the RDMA buffers, which takes care
112 rds_ib_send_complete(rm, wc_status, rds_rdma_send_complete);
114 if (rm->rdma.m_rdma_op.r_write)
115 rds_stats_add(s_send_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
117 rds_stats_add(s_recv_rdma_bytes, rm->rdma.m_rdma_op.r_bytes);
120 if (rm->atomic.op_active) {
121 struct rm_atomic_op *op = &rm->atomic;
123 /* unmap atomic recvbuf */
125 ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1,
130 rds_ib_send_complete(rm, wc_status, rds_atomic_send_complete);
132 if (rm->atomic.op_type == RDS_ATOMIC_TYPE_CSWP)
133 rds_stats_inc(s_atomic_cswp);
135 rds_stats_inc(s_atomic_fadd);
138 /* If anyone waited for this message to get flushed out, wake
140 rds_message_unmapped(rm);
146 void rds_ib_send_init_ring(struct rds_ib_connection *ic)
148 struct rds_ib_send_work *send;
151 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
157 send->s_wr.wr_id = i;
158 send->s_wr.sg_list = send->s_sge;
159 send->s_wr.ex.imm_data = 0;
161 sge = &send->s_sge[0];
162 sge->addr = ic->i_send_hdrs_dma + (i * sizeof(struct rds_header));
163 sge->length = sizeof(struct rds_header);
164 sge->lkey = ic->i_mr->lkey;
166 send->s_sge[1].lkey = ic->i_mr->lkey;
170 void rds_ib_send_clear_ring(struct rds_ib_connection *ic)
172 struct rds_ib_send_work *send;
175 for (i = 0, send = ic->i_sends; i < ic->i_send_ring.w_nr; i++, send++) {
176 if (!send->s_rm || send->s_wr.opcode == 0xdead)
178 rds_ib_send_unmap_rm(ic, send, IB_WC_WR_FLUSH_ERR);
183 * The _oldest/_free ring operations here race cleanly with the alloc/unalloc
184 * operations performed in the send path. As the sender allocs and potentially
185 * unallocs the next free entry in the ring it doesn't alter which is
186 * the next to be freed, which is what this is concerned with.
188 void rds_ib_send_cq_comp_handler(struct ib_cq *cq, void *context)
190 struct rds_connection *conn = context;
191 struct rds_ib_connection *ic = conn->c_transport_data;
193 struct rds_ib_send_work *send;
199 rdsdebug("cq %p conn %p\n", cq, conn);
200 rds_ib_stats_inc(s_ib_tx_cq_call);
201 ret = ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
203 rdsdebug("ib_req_notify_cq send failed: %d\n", ret);
205 while (ib_poll_cq(cq, 1, &wc) > 0) {
206 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
207 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
208 be32_to_cpu(wc.ex.imm_data));
209 rds_ib_stats_inc(s_ib_tx_cq_event);
211 if (wc.wr_id == RDS_IB_ACK_WR_ID) {
212 if (ic->i_ack_queued + HZ/2 < jiffies)
213 rds_ib_stats_inc(s_ib_tx_stalled);
214 rds_ib_ack_send_complete(ic);
218 oldest = rds_ib_ring_oldest(&ic->i_send_ring);
220 completed = rds_ib_ring_completed(&ic->i_send_ring, wc.wr_id, oldest);
222 for (i = 0; i < completed; i++) {
223 send = &ic->i_sends[oldest];
225 /* In the error case, wc.opcode sometimes contains garbage */
226 switch (send->s_wr.opcode) {
229 rds_ib_send_unmap_rm(ic, send, wc.status);
231 case IB_WR_RDMA_WRITE:
232 case IB_WR_RDMA_READ:
233 case IB_WR_ATOMIC_FETCH_AND_ADD:
234 case IB_WR_ATOMIC_CMP_AND_SWP:
235 /* Nothing to be done - the SG list will be unmapped
236 * when the SEND completes. */
239 if (printk_ratelimit())
241 "RDS/IB: %s: unexpected opcode 0x%x in WR!\n",
242 __func__, send->s_wr.opcode);
246 send->s_wr.opcode = 0xdead;
247 send->s_wr.num_sge = 1;
248 if (send->s_queued + HZ/2 < jiffies)
249 rds_ib_stats_inc(s_ib_tx_stalled);
251 /* If a RDMA operation produced an error, signal this right
252 * away. If we don't, the subsequent SEND that goes with this
253 * RDMA will be canceled with ERR_WFLUSH, and the application
254 * never learn that the RDMA failed. */
255 if (unlikely(wc.status == IB_WC_REM_ACCESS_ERR && send->s_op)) {
256 struct rds_message *rm;
258 rm = rds_send_get_message(conn, send->s_op);
260 rds_ib_send_unmap_rm(ic, send, wc.status);
261 rds_ib_send_complete(rm, wc.status, rds_rdma_send_complete);
266 oldest = (oldest + 1) % ic->i_send_ring.w_nr;
269 rds_ib_ring_free(&ic->i_send_ring, completed);
271 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags) ||
272 test_bit(0, &conn->c_map_queued))
273 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
275 /* We expect errors as the qp is drained during shutdown */
276 if (wc.status != IB_WC_SUCCESS && rds_conn_up(conn)) {
277 rds_ib_conn_error(conn,
278 "send completion on %pI4 "
279 "had status %u, disconnecting and reconnecting\n",
280 &conn->c_faddr, wc.status);
286 * This is the main function for allocating credits when sending
289 * Conceptually, we have two counters:
290 * - send credits: this tells us how many WRs we're allowed
291 * to submit without overruning the reciever's queue. For
292 * each SEND WR we post, we decrement this by one.
294 * - posted credits: this tells us how many WRs we recently
295 * posted to the receive queue. This value is transferred
296 * to the peer as a "credit update" in a RDS header field.
297 * Every time we transmit credits to the peer, we subtract
298 * the amount of transferred credits from this counter.
300 * It is essential that we avoid situations where both sides have
301 * exhausted their send credits, and are unable to send new credits
302 * to the peer. We achieve this by requiring that we send at least
303 * one credit update to the peer before exhausting our credits.
304 * When new credits arrive, we subtract one credit that is withheld
305 * until we've posted new buffers and are ready to transmit these
306 * credits (see rds_ib_send_add_credits below).
308 * The RDS send code is essentially single-threaded; rds_send_xmit
309 * grabs c_send_lock to ensure exclusive access to the send ring.
310 * However, the ACK sending code is independent and can race with
313 * In the send path, we need to update the counters for send credits
314 * and the counter of posted buffers atomically - when we use the
315 * last available credit, we cannot allow another thread to race us
316 * and grab the posted credits counter. Hence, we have to use a
317 * spinlock to protect the credit counter, or use atomics.
319 * Spinlocks shared between the send and the receive path are bad,
320 * because they create unnecessary delays. An early implementation
321 * using a spinlock showed a 5% degradation in throughput at some
324 * This implementation avoids spinlocks completely, putting both
325 * counters into a single atomic, and updating that atomic using
326 * atomic_add (in the receive path, when receiving fresh credits),
327 * and using atomic_cmpxchg when updating the two counters.
329 int rds_ib_send_grab_credits(struct rds_ib_connection *ic,
330 u32 wanted, u32 *adv_credits, int need_posted, int max_posted)
332 unsigned int avail, posted, got = 0, advertise;
341 oldval = newval = atomic_read(&ic->i_credits);
342 posted = IB_GET_POST_CREDITS(oldval);
343 avail = IB_GET_SEND_CREDITS(oldval);
345 rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n",
346 wanted, avail, posted);
348 /* The last credit must be used to send a credit update. */
349 if (avail && !posted)
352 if (avail < wanted) {
353 struct rds_connection *conn = ic->i_cm_id->context;
355 /* Oops, there aren't that many credits left! */
356 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
359 /* Sometimes you get what you want, lalala. */
362 newval -= IB_SET_SEND_CREDITS(got);
365 * If need_posted is non-zero, then the caller wants
366 * the posted regardless of whether any send credits are
369 if (posted && (got || need_posted)) {
370 advertise = min_t(unsigned int, posted, max_posted);
371 newval -= IB_SET_POST_CREDITS(advertise);
374 /* Finally bill everything */
375 if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval)
378 *adv_credits = advertise;
382 void rds_ib_send_add_credits(struct rds_connection *conn, unsigned int credits)
384 struct rds_ib_connection *ic = conn->c_transport_data;
389 rdsdebug("rds_ib_send_add_credits(%u): current=%u%s\n",
391 IB_GET_SEND_CREDITS(atomic_read(&ic->i_credits)),
392 test_bit(RDS_LL_SEND_FULL, &conn->c_flags) ? ", ll_send_full" : "");
394 atomic_add(IB_SET_SEND_CREDITS(credits), &ic->i_credits);
395 if (test_and_clear_bit(RDS_LL_SEND_FULL, &conn->c_flags))
396 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
398 WARN_ON(IB_GET_SEND_CREDITS(credits) >= 16384);
400 rds_ib_stats_inc(s_ib_rx_credit_updates);
403 void rds_ib_advertise_credits(struct rds_connection *conn, unsigned int posted)
405 struct rds_ib_connection *ic = conn->c_transport_data;
410 atomic_add(IB_SET_POST_CREDITS(posted), &ic->i_credits);
412 /* Decide whether to send an update to the peer now.
413 * If we would send a credit update for every single buffer we
414 * post, we would end up with an ACK storm (ACK arrives,
415 * consumes buffer, we refill the ring, send ACK to remote
416 * advertising the newly posted buffer... ad inf)
418 * Performance pretty much depends on how often we send
419 * credit updates - too frequent updates mean lots of ACKs.
420 * Too infrequent updates, and the peer will run out of
421 * credits and has to throttle.
422 * For the time being, 16 seems to be a good compromise.
424 if (IB_GET_POST_CREDITS(atomic_read(&ic->i_credits)) >= 16)
425 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
429 * This can be called multiple times for a given message. The first time
430 * we see a message we map its scatterlist into the IB device so that
431 * we can provide that mapped address to the IB scatter gather entries
432 * in the IB work requests. We translate the scatterlist into a series
433 * of work requests that fragment the message. These work requests complete
434 * in order so we pass ownership of the message to the completion handler
435 * once we send the final fragment.
437 * The RDS core uses the c_send_lock to only enter this function once
438 * per connection. This makes sure that the tx ring alloc/unalloc pairs
439 * don't get out of sync and confuse the ring.
441 int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm,
442 unsigned int hdr_off, unsigned int sg, unsigned int off)
444 struct rds_ib_connection *ic = conn->c_transport_data;
445 struct ib_device *dev = ic->i_cm_id->device;
446 struct rds_ib_send_work *send = NULL;
447 struct rds_ib_send_work *first;
448 struct rds_ib_send_work *prev;
449 struct ib_send_wr *failed_wr;
450 struct scatterlist *scat;
454 u32 credit_alloc = 0;
460 int flow_controlled = 0;
462 BUG_ON(off % RDS_FRAG_SIZE);
463 BUG_ON(hdr_off != 0 && hdr_off != sizeof(struct rds_header));
465 /* Do not send cong updates to IB loopback */
467 && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) {
468 rds_cong_map_updated(conn->c_fcong, ~(u64) 0);
469 return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES;
472 /* FIXME we may overallocate here */
473 if (be32_to_cpu(rm->m_inc.i_hdr.h_len) == 0)
476 i = ceil(be32_to_cpu(rm->m_inc.i_hdr.h_len), RDS_FRAG_SIZE);
478 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
479 if (work_alloc == 0) {
480 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
481 rds_ib_stats_inc(s_ib_tx_ring_full);
487 credit_alloc = rds_ib_send_grab_credits(ic, work_alloc, &posted, 0, RDS_MAX_ADV_CREDIT);
488 adv_credits += posted;
489 if (credit_alloc < work_alloc) {
490 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - credit_alloc);
491 work_alloc = credit_alloc;
494 if (work_alloc == 0) {
495 set_bit(RDS_LL_SEND_FULL, &conn->c_flags);
496 rds_ib_stats_inc(s_ib_tx_throttle);
502 /* map the message the first time we see it */
504 if (rm->data.m_nents) {
505 rm->data.m_count = ib_dma_map_sg(dev,
509 rdsdebug("ic %p mapping rm %p: %d\n", ic, rm, rm->data.m_count);
510 if (rm->data.m_count == 0) {
511 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
512 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
513 ret = -ENOMEM; /* XXX ? */
517 rm->data.m_count = 0;
520 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
521 rds_message_addref(rm);
524 /* Finalize the header */
525 if (test_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags))
526 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_ACK_REQUIRED;
527 if (test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags))
528 rm->m_inc.i_hdr.h_flags |= RDS_FLAG_RETRANSMITTED;
530 /* If it has a RDMA op, tell the peer we did it. This is
531 * used by the peer to release use-once RDMA MRs. */
532 if (rm->rdma.m_rdma_op.r_active) {
533 struct rds_ext_header_rdma ext_hdr;
535 ext_hdr.h_rdma_rkey = cpu_to_be32(rm->rdma.m_rdma_op.r_key);
536 rds_message_add_extension(&rm->m_inc.i_hdr,
537 RDS_EXTHDR_RDMA, &ext_hdr, sizeof(ext_hdr));
539 if (rm->m_rdma_cookie) {
540 rds_message_add_rdma_dest_extension(&rm->m_inc.i_hdr,
541 rds_rdma_cookie_key(rm->m_rdma_cookie),
542 rds_rdma_cookie_offset(rm->m_rdma_cookie));
545 /* Note - rds_ib_piggyb_ack clears the ACK_REQUIRED bit, so
546 * we should not do this unless we have a chance of at least
547 * sticking the header into the send ring. Which is why we
548 * should call rds_ib_ring_alloc first. */
549 rm->m_inc.i_hdr.h_ack = cpu_to_be64(rds_ib_piggyb_ack(ic));
550 rds_message_make_checksum(&rm->m_inc.i_hdr);
553 * Update adv_credits since we reset the ACK_REQUIRED bit.
555 rds_ib_send_grab_credits(ic, 0, &posted, 1, RDS_MAX_ADV_CREDIT - adv_credits);
556 adv_credits += posted;
557 BUG_ON(adv_credits > 255);
560 /* Sometimes you want to put a fence between an RDMA
561 * READ and the following SEND.
562 * We could either do this all the time
563 * or when requested by the user. Right now, we let
564 * the application choose.
566 if (rm->rdma.m_rdma_op.r_active && rm->rdma.m_rdma_op.r_fence)
567 send_flags = IB_SEND_FENCE;
569 /* Each frag gets a header. Msgs may be 0 bytes */
570 send = &ic->i_sends[pos];
573 scat = &rm->data.m_sg[sg];
576 unsigned int len = 0;
578 /* Set up the header */
579 send->s_wr.send_flags = send_flags;
580 send->s_wr.opcode = IB_WR_SEND;
581 send->s_wr.num_sge = 1;
582 send->s_wr.next = NULL;
583 send->s_queued = jiffies;
586 send->s_sge[0].addr = ic->i_send_hdrs_dma
587 + (pos * sizeof(struct rds_header));
588 send->s_sge[0].length = sizeof(struct rds_header);
590 memcpy(&ic->i_send_hdrs[pos], &rm->m_inc.i_hdr, sizeof(struct rds_header));
592 /* Set up the data, if present */
594 && scat != &rm->data.m_sg[rm->data.m_count]) {
595 len = min(RDS_FRAG_SIZE, ib_sg_dma_len(dev, scat) - off);
596 send->s_wr.num_sge = 2;
598 send->s_sge[1].addr = ib_sg_dma_address(dev, scat) + off;
599 send->s_sge[1].length = len;
603 if (off == ib_sg_dma_len(dev, scat)) {
610 * We want to delay signaling completions just enough to get
611 * the batching benefits but not so much that we create dead time
614 if (ic->i_unsignaled_wrs-- == 0) {
615 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
616 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
620 * Always signal the last one if we're stopping due to flow control.
622 if (flow_controlled && i == (work_alloc-1))
623 send->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
625 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
626 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
629 struct rds_header *hdr = &ic->i_send_hdrs[pos];
631 /* add credit and redo the header checksum */
632 hdr->h_credit = adv_credits;
633 rds_message_make_checksum(hdr);
635 rds_ib_stats_inc(s_ib_tx_credit_updates);
639 prev->s_wr.next = &send->s_wr;
642 pos = (pos + 1) % ic->i_send_ring.w_nr;
643 send = &ic->i_sends[pos];
646 } while (i < work_alloc
647 && scat != &rm->data.m_sg[rm->data.m_count]);
649 /* Account the RDS header in the number of bytes we sent, but just once.
650 * The caller has no concept of fragmentation. */
652 bytes_sent += sizeof(struct rds_header);
654 /* if we finished the message then send completion owns it */
655 if (scat == &rm->data.m_sg[rm->data.m_count]) {
656 prev->s_rm = ic->i_rm;
657 prev->s_wr.send_flags |= IB_SEND_SIGNALED | IB_SEND_SOLICITED;
661 /* Put back wrs & credits we didn't use */
662 if (i < work_alloc) {
663 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
666 if (ic->i_flowctl && i < credit_alloc)
667 rds_ib_send_add_credits(conn, credit_alloc - i);
669 /* XXX need to worry about failed_wr and partial sends. */
670 failed_wr = &first->s_wr;
671 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
672 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
673 first, &first->s_wr, ret, failed_wr);
674 BUG_ON(failed_wr != &first->s_wr);
676 printk(KERN_WARNING "RDS/IB: ib_post_send to %pI4 "
677 "returned %d\n", &conn->c_faddr, ret);
678 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
680 ic->i_rm = prev->s_rm;
684 rds_ib_conn_error(ic->conn, "ib_post_send failed\n");
695 * Issue atomic operation.
696 * A simplified version of the rdma case, we always map 1 SG, and
697 * only 8 bytes, for the return value from the atomic operation.
699 int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op)
701 struct rds_ib_connection *ic = conn->c_transport_data;
702 struct rds_ib_send_work *send = NULL;
703 struct ib_send_wr *failed_wr;
704 struct rds_ib_device *rds_ibdev;
709 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
711 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos);
712 if (work_alloc != 1) {
713 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
714 rds_ib_stats_inc(s_ib_tx_ring_full);
719 /* address of send request in ring */
720 send = &ic->i_sends[pos];
721 send->s_queued = jiffies;
723 if (op->op_type == RDS_ATOMIC_TYPE_CSWP) {
724 send->s_wr.opcode = IB_WR_ATOMIC_CMP_AND_SWP;
725 send->s_wr.wr.atomic.compare_add = op->op_compare;
726 send->s_wr.wr.atomic.swap = op->op_swap_add;
728 send->s_wr.opcode = IB_WR_ATOMIC_FETCH_AND_ADD;
729 send->s_wr.wr.atomic.compare_add = op->op_swap_add;
730 send->s_wr.wr.atomic.swap = 0;
732 send->s_wr.send_flags = IB_SEND_SIGNALED;
733 send->s_wr.num_sge = 1;
734 send->s_wr.next = NULL;
735 send->s_wr.wr.atomic.remote_addr = op->op_remote_addr;
736 send->s_wr.wr.atomic.rkey = op->op_rkey;
738 /* map 8 byte retval buffer to the device */
739 ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE);
740 rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret);
742 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
743 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
744 ret = -ENOMEM; /* XXX ? */
748 /* Convert our struct scatterlist to struct ib_sge */
749 send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg);
750 send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg);
751 send->s_sge[0].lkey = ic->i_mr->lkey;
753 rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr,
754 send->s_sge[0].addr, send->s_sge[0].length);
756 failed_wr = &send->s_wr;
757 ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr);
758 rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic,
759 send, &send->s_wr, ret, failed_wr);
760 BUG_ON(failed_wr != &send->s_wr);
762 printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 "
763 "returned %d\n", &conn->c_faddr, ret);
764 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
768 if (unlikely(failed_wr != &send->s_wr)) {
769 printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
770 BUG_ON(failed_wr != &send->s_wr);
777 int rds_ib_xmit_rdma(struct rds_connection *conn, struct rds_rdma_op *op)
779 struct rds_ib_connection *ic = conn->c_transport_data;
780 struct rds_ib_send_work *send = NULL;
781 struct rds_ib_send_work *first;
782 struct rds_ib_send_work *prev;
783 struct ib_send_wr *failed_wr;
784 struct rds_ib_device *rds_ibdev;
785 struct scatterlist *scat;
787 u64 remote_addr = op->r_remote_addr;
796 rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client);
798 /* map the message the first time we see it */
800 op->r_count = ib_dma_map_sg(ic->i_cm_id->device,
801 op->r_sg, op->r_nents, (op->r_write) ?
802 DMA_TO_DEVICE : DMA_FROM_DEVICE);
803 rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->r_count);
804 if (op->r_count == 0) {
805 rds_ib_stats_inc(s_ib_tx_sg_mapping_failure);
806 ret = -ENOMEM; /* XXX ? */
814 * Instead of knowing how to return a partial rdma read/write we insist that there
815 * be enough work requests to send the entire message.
817 i = ceil(op->r_count, rds_ibdev->max_sge);
819 work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos);
820 if (work_alloc != i) {
821 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
822 rds_ib_stats_inc(s_ib_tx_ring_full);
827 send = &ic->i_sends[pos];
832 num_sge = op->r_count;
834 for (i = 0; i < work_alloc && scat != &op->r_sg[op->r_count]; i++) {
835 send->s_wr.send_flags = 0;
836 send->s_queued = jiffies;
838 * We want to delay signaling completions just enough to get
839 * the batching benefits but not so much that we create dead time on the wire.
841 if (ic->i_unsignaled_wrs-- == 0) {
842 ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs;
843 send->s_wr.send_flags = IB_SEND_SIGNALED;
846 send->s_wr.opcode = op->r_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ;
847 send->s_wr.wr.rdma.remote_addr = remote_addr;
848 send->s_wr.wr.rdma.rkey = op->r_key;
851 if (num_sge > rds_ibdev->max_sge) {
852 send->s_wr.num_sge = rds_ibdev->max_sge;
853 num_sge -= rds_ibdev->max_sge;
855 send->s_wr.num_sge = num_sge;
858 send->s_wr.next = NULL;
861 prev->s_wr.next = &send->s_wr;
863 for (j = 0; j < send->s_wr.num_sge && scat != &op->r_sg[op->r_count]; j++) {
864 len = ib_sg_dma_len(ic->i_cm_id->device, scat);
865 send->s_sge[j].addr =
866 ib_sg_dma_address(ic->i_cm_id->device, scat);
867 send->s_sge[j].length = len;
868 send->s_sge[j].lkey = ic->i_mr->lkey;
871 rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr);
877 rdsdebug("send %p wr %p num_sge %u next %p\n", send,
878 &send->s_wr, send->s_wr.num_sge, send->s_wr.next);
881 if (++send == &ic->i_sends[ic->i_send_ring.w_nr])
885 /* if we finished the message then send completion owns it */
886 if (scat == &op->r_sg[op->r_count])
887 prev->s_wr.send_flags = IB_SEND_SIGNALED;
889 if (i < work_alloc) {
890 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i);
894 failed_wr = &first->s_wr;
895 ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr);
896 rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic,
897 first, &first->s_wr, ret, failed_wr);
898 BUG_ON(failed_wr != &first->s_wr);
900 printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 "
901 "returned %d\n", &conn->c_faddr, ret);
902 rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc);
906 if (unlikely(failed_wr != &first->s_wr)) {
907 printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret);
908 BUG_ON(failed_wr != &first->s_wr);
916 void rds_ib_xmit_complete(struct rds_connection *conn)
918 struct rds_ib_connection *ic = conn->c_transport_data;
920 /* We may have a pending ACK or window update we were unable
921 * to send previously (due to flow control). Try again. */
922 rds_ib_attempt_ack(ic);