2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/dma-mapping.h>
37 #include <rdma/rdma_cm.h>
42 static struct kmem_cache *rds_ib_incoming_slab;
43 static struct kmem_cache *rds_ib_frag_slab;
44 static atomic_t rds_ib_allocation = ATOMIC_INIT(0);
46 /* Free frag and attached recv buffer f_sg */
47 static void rds_ib_frag_free(struct rds_page_frag *frag)
49 rdsdebug("frag %p page %p\n", frag, sg_page(&frag->f_sg));
50 __free_page(sg_page(&frag->f_sg));
51 kmem_cache_free(rds_ib_frag_slab, frag);
54 void rds_ib_recv_init_ring(struct rds_ib_connection *ic)
56 struct rds_ib_recv_work *recv;
59 for (i = 0, recv = ic->i_recvs; i < ic->i_recv_ring.w_nr; i++, recv++) {
65 recv->r_wr.next = NULL;
67 recv->r_wr.sg_list = recv->r_sge;
68 recv->r_wr.num_sge = RDS_IB_RECV_SGE;
70 sge = &recv->r_sge[0];
71 sge->addr = ic->i_recv_hdrs_dma + (i * sizeof(struct rds_header));
72 sge->length = sizeof(struct rds_header);
73 sge->lkey = ic->i_mr->lkey;
75 sge = &recv->r_sge[1];
77 sge->length = RDS_FRAG_SIZE;
78 sge->lkey = ic->i_mr->lkey;
82 static void rds_ib_recv_clear_one(struct rds_ib_connection *ic,
83 struct rds_ib_recv_work *recv)
86 rds_inc_put(&recv->r_ibinc->ii_inc);
90 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
91 rds_ib_frag_free(recv->r_frag);
96 void rds_ib_recv_clear_ring(struct rds_ib_connection *ic)
100 for (i = 0; i < ic->i_recv_ring.w_nr; i++)
101 rds_ib_recv_clear_one(ic, &ic->i_recvs[i]);
104 static int rds_ib_recv_refill_one(struct rds_connection *conn,
105 struct rds_ib_recv_work *recv)
107 struct rds_ib_connection *ic = conn->c_transport_data;
112 * ibinc was taken from recv if recv contained the start of a message.
113 * recvs that were continuations will still have this allocated.
115 if (!recv->r_ibinc) {
116 if (!atomic_add_unless(&rds_ib_allocation, 1, rds_ib_sysctl_max_recv_allocation)) {
117 rds_ib_stats_inc(s_ib_rx_alloc_limit);
120 recv->r_ibinc = kmem_cache_alloc(rds_ib_incoming_slab, GFP_NOWAIT);
121 if (!recv->r_ibinc) {
122 atomic_dec(&rds_ib_allocation);
125 INIT_LIST_HEAD(&recv->r_ibinc->ii_frags);
126 rds_inc_init(&recv->r_ibinc->ii_inc, conn, conn->c_faddr);
129 WARN_ON(recv->r_frag); /* leak! */
130 recv->r_frag = kmem_cache_alloc(rds_ib_frag_slab, GFP_NOWAIT);
133 INIT_LIST_HEAD(&recv->r_frag->f_item);
134 sg_init_table(&recv->r_frag->f_sg, 1);
135 ret = rds_page_remainder_alloc(&recv->r_frag->f_sg,
136 RDS_FRAG_SIZE, GFP_NOWAIT);
138 kmem_cache_free(rds_ib_frag_slab, recv->r_frag);
143 ret = ib_dma_map_sg(ic->i_cm_id->device, &recv->r_frag->f_sg,
147 sge = &recv->r_sge[0];
148 sge->addr = ic->i_recv_hdrs_dma + (recv - ic->i_recvs) * sizeof(struct rds_header);
149 sge->length = sizeof(struct rds_header);
151 sge = &recv->r_sge[1];
152 sge->addr = sg_dma_address(&recv->r_frag->f_sg);
153 sge->length = sg_dma_len(&recv->r_frag->f_sg);
161 * This tries to allocate and post unused work requests after making sure that
162 * they have all the allocations they need to queue received fragments into
163 * sockets. The i_recv_mutex is held here so that ring_alloc and _unalloc
164 * pairs don't go unmatched.
166 * -1 is returned if posting fails due to temporary resource exhaustion.
168 int rds_ib_recv_refill(struct rds_connection *conn, int prefill)
170 struct rds_ib_connection *ic = conn->c_transport_data;
171 struct rds_ib_recv_work *recv;
172 struct ib_recv_wr *failed_wr;
173 unsigned int posted = 0;
177 while ((prefill || rds_conn_up(conn)) &&
178 rds_ib_ring_alloc(&ic->i_recv_ring, 1, &pos)) {
179 if (pos >= ic->i_recv_ring.w_nr) {
180 printk(KERN_NOTICE "Argh - ring alloc returned pos=%u\n",
186 recv = &ic->i_recvs[pos];
187 ret = rds_ib_recv_refill_one(conn, recv);
193 /* XXX when can this fail? */
194 ret = ib_post_recv(ic->i_cm_id->qp, &recv->r_wr, &failed_wr);
195 rdsdebug("recv %p ibinc %p page %p addr %lu ret %d\n", recv,
196 recv->r_ibinc, sg_page(&recv->r_frag->f_sg),
197 (long) sg_dma_address(&recv->r_frag->f_sg), ret);
199 rds_ib_conn_error(conn, "recv post on "
200 "%pI4 returned %d, disconnecting and "
201 "reconnecting\n", &conn->c_faddr,
210 /* We're doing flow control - update the window. */
211 if (ic->i_flowctl && posted)
212 rds_ib_advertise_credits(conn, posted);
215 rds_ib_ring_unalloc(&ic->i_recv_ring, 1);
219 static void rds_ib_inc_purge(struct rds_incoming *inc)
221 struct rds_ib_incoming *ibinc;
222 struct rds_page_frag *frag;
223 struct rds_page_frag *pos;
225 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
226 rdsdebug("purging ibinc %p inc %p\n", ibinc, inc);
228 list_for_each_entry_safe(frag, pos, &ibinc->ii_frags, f_item) {
229 list_del_init(&frag->f_item);
230 rds_ib_frag_free(frag);
234 void rds_ib_inc_free(struct rds_incoming *inc)
236 struct rds_ib_incoming *ibinc;
238 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
240 rds_ib_inc_purge(inc);
241 rdsdebug("freeing ibinc %p inc %p\n", ibinc, inc);
242 BUG_ON(!list_empty(&ibinc->ii_frags));
243 kmem_cache_free(rds_ib_incoming_slab, ibinc);
244 atomic_dec(&rds_ib_allocation);
245 BUG_ON(atomic_read(&rds_ib_allocation) < 0);
248 int rds_ib_inc_copy_to_user(struct rds_incoming *inc, struct iovec *first_iov,
251 struct rds_ib_incoming *ibinc;
252 struct rds_page_frag *frag;
253 struct iovec *iov = first_iov;
254 unsigned long to_copy;
255 unsigned long frag_off = 0;
256 unsigned long iov_off = 0;
261 ibinc = container_of(inc, struct rds_ib_incoming, ii_inc);
262 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
263 len = be32_to_cpu(inc->i_hdr.h_len);
265 while (copied < size && copied < len) {
266 if (frag_off == RDS_FRAG_SIZE) {
267 frag = list_entry(frag->f_item.next,
268 struct rds_page_frag, f_item);
271 while (iov_off == iov->iov_len) {
276 to_copy = min(iov->iov_len - iov_off, RDS_FRAG_SIZE - frag_off);
277 to_copy = min_t(size_t, to_copy, size - copied);
278 to_copy = min_t(unsigned long, to_copy, len - copied);
280 rdsdebug("%lu bytes to user [%p, %zu] + %lu from frag "
282 to_copy, iov->iov_base, iov->iov_len, iov_off,
283 sg_page(&frag->f_sg), frag->f_sg.offset, frag_off);
285 /* XXX needs + offset for multiple recvs per page */
286 ret = rds_page_copy_to_user(sg_page(&frag->f_sg),
287 frag->f_sg.offset + frag_off,
288 iov->iov_base + iov_off,
303 /* ic starts out kzalloc()ed */
304 void rds_ib_recv_init_ack(struct rds_ib_connection *ic)
306 struct ib_send_wr *wr = &ic->i_ack_wr;
307 struct ib_sge *sge = &ic->i_ack_sge;
309 sge->addr = ic->i_ack_dma;
310 sge->length = sizeof(struct rds_header);
311 sge->lkey = ic->i_mr->lkey;
315 wr->opcode = IB_WR_SEND;
316 wr->wr_id = RDS_IB_ACK_WR_ID;
317 wr->send_flags = IB_SEND_SIGNALED | IB_SEND_SOLICITED;
321 * You'd think that with reliable IB connections you wouldn't need to ack
322 * messages that have been received. The problem is that IB hardware generates
323 * an ack message before it has DMAed the message into memory. This creates a
324 * potential message loss if the HCA is disabled for any reason between when it
325 * sends the ack and before the message is DMAed and processed. This is only a
326 * potential issue if another HCA is available for fail-over.
328 * When the remote host receives our ack they'll free the sent message from
329 * their send queue. To decrease the latency of this we always send an ack
330 * immediately after we've received messages.
332 * For simplicity, we only have one ack in flight at a time. This puts
333 * pressure on senders to have deep enough send queues to absorb the latency of
334 * a single ack frame being in flight. This might not be good enough.
336 * This is implemented by have a long-lived send_wr and sge which point to a
337 * statically allocated ack frame. This ack wr does not fall under the ring
338 * accounting that the tx and rx wrs do. The QP attribute specifically makes
339 * room for it beyond the ring size. Send completion notices its special
340 * wr_id and avoids working with the ring in that case.
342 #ifndef KERNEL_HAS_ATOMIC64
343 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
348 spin_lock_irqsave(&ic->i_ack_lock, flags);
349 ic->i_ack_next = seq;
351 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
352 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
355 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
360 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
362 spin_lock_irqsave(&ic->i_ack_lock, flags);
363 seq = ic->i_ack_next;
364 spin_unlock_irqrestore(&ic->i_ack_lock, flags);
369 static void rds_ib_set_ack(struct rds_ib_connection *ic, u64 seq,
372 atomic64_set(&ic->i_ack_next, seq);
374 smp_mb__before_clear_bit();
375 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
379 static u64 rds_ib_get_ack(struct rds_ib_connection *ic)
381 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
382 smp_mb__after_clear_bit();
384 return atomic64_read(&ic->i_ack_next);
389 static void rds_ib_send_ack(struct rds_ib_connection *ic, unsigned int adv_credits)
391 struct rds_header *hdr = ic->i_ack;
392 struct ib_send_wr *failed_wr;
396 seq = rds_ib_get_ack(ic);
398 rdsdebug("send_ack: ic %p ack %llu\n", ic, (unsigned long long) seq);
399 rds_message_populate_header(hdr, 0, 0, 0);
400 hdr->h_ack = cpu_to_be64(seq);
401 hdr->h_credit = adv_credits;
402 rds_message_make_checksum(hdr);
403 ic->i_ack_queued = jiffies;
405 ret = ib_post_send(ic->i_cm_id->qp, &ic->i_ack_wr, &failed_wr);
407 /* Failed to send. Release the WR, and
410 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
411 set_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
413 rds_ib_stats_inc(s_ib_ack_send_failure);
415 rds_ib_conn_error(ic->conn, "sending ack failed\n");
417 rds_ib_stats_inc(s_ib_ack_sent);
421 * There are 3 ways of getting acknowledgements to the peer:
422 * 1. We call rds_ib_attempt_ack from the recv completion handler
423 * to send an ACK-only frame.
424 * However, there can be only one such frame in the send queue
425 * at any time, so we may have to postpone it.
426 * 2. When another (data) packet is transmitted while there's
427 * an ACK in the queue, we piggyback the ACK sequence number
428 * on the data packet.
429 * 3. If the ACK WR is done sending, we get called from the
430 * send queue completion handler, and check whether there's
431 * another ACK pending (postponed because the WR was on the
432 * queue). If so, we transmit it.
434 * We maintain 2 variables:
435 * - i_ack_flags, which keeps track of whether the ACK WR
436 * is currently in the send queue or not (IB_ACK_IN_FLIGHT)
437 * - i_ack_next, which is the last sequence number we received
439 * Potentially, send queue and receive queue handlers can run concurrently.
440 * It would be nice to not have to use a spinlock to synchronize things,
441 * but the one problem that rules this out is that 64bit updates are
442 * not atomic on all platforms. Things would be a lot simpler if
443 * we had atomic64 or maybe cmpxchg64 everywhere.
445 * Reconnecting complicates this picture just slightly. When we
446 * reconnect, we may be seeing duplicate packets. The peer
447 * is retransmitting them, because it hasn't seen an ACK for
448 * them. It is important that we ACK these.
450 * ACK mitigation adds a header flag "ACK_REQUIRED"; any packet with
451 * this flag set *MUST* be acknowledged immediately.
455 * When we get here, we're called from the recv queue handler.
456 * Check whether we ought to transmit an ACK.
458 void rds_ib_attempt_ack(struct rds_ib_connection *ic)
460 unsigned int adv_credits;
462 if (!test_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
465 if (test_and_set_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags)) {
466 rds_ib_stats_inc(s_ib_ack_send_delayed);
470 /* Can we get a send credit? */
471 if (!rds_ib_send_grab_credits(ic, 1, &adv_credits, 0, RDS_MAX_ADV_CREDIT)) {
472 rds_ib_stats_inc(s_ib_tx_throttle);
473 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
477 clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags);
478 rds_ib_send_ack(ic, adv_credits);
482 * We get here from the send completion handler, when the
483 * adapter tells us the ACK frame was sent.
485 void rds_ib_ack_send_complete(struct rds_ib_connection *ic)
487 clear_bit(IB_ACK_IN_FLIGHT, &ic->i_ack_flags);
488 rds_ib_attempt_ack(ic);
492 * This is called by the regular xmit code when it wants to piggyback
493 * an ACK on an outgoing frame.
495 u64 rds_ib_piggyb_ack(struct rds_ib_connection *ic)
497 if (test_and_clear_bit(IB_ACK_REQUESTED, &ic->i_ack_flags))
498 rds_ib_stats_inc(s_ib_ack_send_piggybacked);
499 return rds_ib_get_ack(ic);
503 * It's kind of lame that we're copying from the posted receive pages into
504 * long-lived bitmaps. We could have posted the bitmaps and rdma written into
505 * them. But receiving new congestion bitmaps should be a *rare* event, so
506 * hopefully we won't need to invest that complexity in making it more
507 * efficient. By copying we can share a simpler core with TCP which has to
510 static void rds_ib_cong_recv(struct rds_connection *conn,
511 struct rds_ib_incoming *ibinc)
513 struct rds_cong_map *map;
514 unsigned int map_off;
515 unsigned int map_page;
516 struct rds_page_frag *frag;
517 unsigned long frag_off;
518 unsigned long to_copy;
519 unsigned long copied;
520 uint64_t uncongested = 0;
523 /* catch completely corrupt packets */
524 if (be32_to_cpu(ibinc->ii_inc.i_hdr.h_len) != RDS_CONG_MAP_BYTES)
531 frag = list_entry(ibinc->ii_frags.next, struct rds_page_frag, f_item);
536 while (copied < RDS_CONG_MAP_BYTES) {
540 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
541 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
543 addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0);
545 src = addr + frag_off;
546 dst = (void *)map->m_page_addrs[map_page] + map_off;
547 for (k = 0; k < to_copy; k += 8) {
548 /* Record ports that became uncongested, ie
549 * bits that changed from 0 to 1. */
550 uncongested |= ~(*src) & *dst;
553 kunmap_atomic(addr, KM_SOFTIRQ0);
558 if (map_off == PAGE_SIZE) {
564 if (frag_off == RDS_FRAG_SIZE) {
565 frag = list_entry(frag->f_item.next,
566 struct rds_page_frag, f_item);
571 /* the congestion map is in little endian order */
572 uncongested = le64_to_cpu(uncongested);
574 rds_cong_map_updated(map, uncongested);
578 * Rings are posted with all the allocations they'll need to queue the
579 * incoming message to the receiving socket so this can't fail.
580 * All fragments start with a header, so we can make sure we're not receiving
581 * garbage, and we can tell a small 8 byte fragment from an ACK frame.
583 struct rds_ib_ack_state {
586 unsigned int ack_required:1;
587 unsigned int ack_next_valid:1;
588 unsigned int ack_recv_valid:1;
591 static void rds_ib_process_recv(struct rds_connection *conn,
592 struct rds_ib_recv_work *recv, u32 data_len,
593 struct rds_ib_ack_state *state)
595 struct rds_ib_connection *ic = conn->c_transport_data;
596 struct rds_ib_incoming *ibinc = ic->i_ibinc;
597 struct rds_header *ihdr, *hdr;
599 /* XXX shut down the connection if port 0,0 are seen? */
601 rdsdebug("ic %p ibinc %p recv %p byte len %u\n", ic, ibinc, recv,
604 if (data_len < sizeof(struct rds_header)) {
605 rds_ib_conn_error(conn, "incoming message "
606 "from %pI4 didn't inclue a "
607 "header, disconnecting and "
612 data_len -= sizeof(struct rds_header);
614 ihdr = &ic->i_recv_hdrs[recv - ic->i_recvs];
616 /* Validate the checksum. */
617 if (!rds_message_verify_checksum(ihdr)) {
618 rds_ib_conn_error(conn, "incoming message "
619 "from %pI4 has corrupted header - "
620 "forcing a reconnect\n",
622 rds_stats_inc(s_recv_drop_bad_checksum);
626 /* Process the ACK sequence which comes with every packet */
627 state->ack_recv = be64_to_cpu(ihdr->h_ack);
628 state->ack_recv_valid = 1;
630 /* Process the credits update if there was one */
632 rds_ib_send_add_credits(conn, ihdr->h_credit);
634 if (ihdr->h_sport == 0 && ihdr->h_dport == 0 && data_len == 0) {
635 /* This is an ACK-only packet. The fact that it gets
636 * special treatment here is that historically, ACKs
637 * were rather special beasts.
639 rds_ib_stats_inc(s_ib_ack_received);
642 * Usually the frags make their way on to incs and are then freed as
643 * the inc is freed. We don't go that route, so we have to drop the
644 * page ref ourselves. We can't just leave the page on the recv
645 * because that confuses the dma mapping of pages and each recv's use
648 * FIXME: Fold this into the code path below.
650 rds_ib_frag_free(recv->r_frag);
656 * If we don't already have an inc on the connection then this
657 * fragment has a header and starts a message.. copy its header
658 * into the inc and save the inc so we can hang upcoming fragments
662 ibinc = recv->r_ibinc;
663 recv->r_ibinc = NULL;
666 hdr = &ibinc->ii_inc.i_hdr;
667 memcpy(hdr, ihdr, sizeof(*hdr));
668 ic->i_recv_data_rem = be32_to_cpu(hdr->h_len);
670 rdsdebug("ic %p ibinc %p rem %u flag 0x%x\n", ic, ibinc,
671 ic->i_recv_data_rem, hdr->h_flags);
673 hdr = &ibinc->ii_inc.i_hdr;
674 /* We can't just use memcmp here; fragments of a
675 * single message may carry different ACKs */
676 if (hdr->h_sequence != ihdr->h_sequence ||
677 hdr->h_len != ihdr->h_len ||
678 hdr->h_sport != ihdr->h_sport ||
679 hdr->h_dport != ihdr->h_dport) {
680 rds_ib_conn_error(conn,
681 "fragment header mismatch; forcing reconnect\n");
686 list_add_tail(&recv->r_frag->f_item, &ibinc->ii_frags);
689 if (ic->i_recv_data_rem > RDS_FRAG_SIZE)
690 ic->i_recv_data_rem -= RDS_FRAG_SIZE;
692 ic->i_recv_data_rem = 0;
695 if (ibinc->ii_inc.i_hdr.h_flags == RDS_FLAG_CONG_BITMAP)
696 rds_ib_cong_recv(conn, ibinc);
698 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
699 &ibinc->ii_inc, GFP_ATOMIC,
701 state->ack_next = be64_to_cpu(hdr->h_sequence);
702 state->ack_next_valid = 1;
705 /* Evaluate the ACK_REQUIRED flag *after* we received
706 * the complete frame, and after bumping the next_rx
708 if (hdr->h_flags & RDS_FLAG_ACK_REQUIRED) {
709 rds_stats_inc(s_recv_ack_required);
710 state->ack_required = 1;
713 rds_inc_put(&ibinc->ii_inc);
718 * Plucking the oldest entry from the ring can be done concurrently with
719 * the thread refilling the ring. Each ring operation is protected by
720 * spinlocks and the transient state of refilling doesn't change the
721 * recording of which entry is oldest.
723 * This relies on IB only calling one cq comp_handler for each cq so that
724 * there will only be one caller of rds_recv_incoming() per RDS connection.
726 void rds_ib_recv_cq_comp_handler(struct ib_cq *cq, void *context)
728 struct rds_connection *conn = context;
729 struct rds_ib_connection *ic = conn->c_transport_data;
731 rdsdebug("conn %p cq %p\n", conn, cq);
733 rds_ib_stats_inc(s_ib_rx_cq_call);
735 tasklet_schedule(&ic->i_recv_tasklet);
738 static inline void rds_poll_cq(struct rds_ib_connection *ic,
739 struct rds_ib_ack_state *state)
741 struct rds_connection *conn = ic->conn;
743 struct rds_ib_recv_work *recv;
745 while (ib_poll_cq(ic->i_recv_cq, 1, &wc) > 0) {
746 rdsdebug("wc wr_id 0x%llx status %u byte_len %u imm_data %u\n",
747 (unsigned long long)wc.wr_id, wc.status, wc.byte_len,
748 be32_to_cpu(wc.ex.imm_data));
749 rds_ib_stats_inc(s_ib_rx_cq_event);
751 recv = &ic->i_recvs[rds_ib_ring_oldest(&ic->i_recv_ring)];
753 ib_dma_unmap_sg(ic->i_cm_id->device, &recv->r_frag->f_sg, 1, DMA_FROM_DEVICE);
756 * Also process recvs in connecting state because it is possible
757 * to get a recv completion _before_ the rdmacm ESTABLISHED
758 * event is processed.
760 if (rds_conn_up(conn) || rds_conn_connecting(conn)) {
761 /* We expect errors as the qp is drained during shutdown */
762 if (wc.status == IB_WC_SUCCESS) {
763 rds_ib_process_recv(conn, recv, wc.byte_len, state);
765 rds_ib_conn_error(conn, "recv completion on "
766 "%pI4 had status %u, disconnecting and "
767 "reconnecting\n", &conn->c_faddr,
772 rds_ib_ring_free(&ic->i_recv_ring, 1);
776 void rds_ib_recv_tasklet_fn(unsigned long data)
778 struct rds_ib_connection *ic = (struct rds_ib_connection *) data;
779 struct rds_connection *conn = ic->conn;
780 struct rds_ib_ack_state state = { 0, };
782 rds_poll_cq(ic, &state);
783 ib_req_notify_cq(ic->i_recv_cq, IB_CQ_SOLICITED);
784 rds_poll_cq(ic, &state);
786 if (state.ack_next_valid)
787 rds_ib_set_ack(ic, state.ack_next, state.ack_required);
788 if (state.ack_recv_valid && state.ack_recv > ic->i_ack_recv) {
789 rds_send_drop_acked(conn, state.ack_recv, NULL);
790 ic->i_ack_recv = state.ack_recv;
792 if (rds_conn_up(conn))
793 rds_ib_attempt_ack(ic);
795 /* If we ever end up with a really empty receive ring, we're
796 * in deep trouble, as the sender will definitely see RNR
798 if (rds_ib_ring_empty(&ic->i_recv_ring))
799 rds_ib_stats_inc(s_ib_rx_ring_empty);
801 if (rds_ib_ring_low(&ic->i_recv_ring))
802 rds_ib_recv_refill(conn, 0);
805 int rds_ib_recv(struct rds_connection *conn)
807 struct rds_ib_connection *ic = conn->c_transport_data;
810 rdsdebug("conn %p\n", conn);
811 if (rds_conn_up(conn))
812 rds_ib_attempt_ack(ic);
817 int __init rds_ib_recv_init(void)
822 /* Default to 30% of all available RAM for recv memory */
824 rds_ib_sysctl_max_recv_allocation = si.totalram / 3 * PAGE_SIZE / RDS_FRAG_SIZE;
826 rds_ib_incoming_slab = kmem_cache_create("rds_ib_incoming",
827 sizeof(struct rds_ib_incoming),
829 if (!rds_ib_incoming_slab)
832 rds_ib_frag_slab = kmem_cache_create("rds_ib_frag",
833 sizeof(struct rds_page_frag),
835 if (!rds_ib_frag_slab)
836 kmem_cache_destroy(rds_ib_incoming_slab);
843 void rds_ib_recv_exit(void)
845 kmem_cache_destroy(rds_ib_incoming_slab);
846 kmem_cache_destroy(rds_ib_frag_slab);