2 * Copyright (c) 2006 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
37 #include <linux/list.h>
41 /* When transmitting messages in rds_send_xmit, we need to emerge from
42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
44 * Also, it seems fairer to not let one busy connection stall all the
47 * send_batch_count is the number of times we'll loop in send_xmit. Setting
48 * it to 0 will restore the old behavior (where we looped until we had
51 static int send_batch_count = 64;
52 module_param(send_batch_count, int, 0444);
53 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
56 * Reset the send state. Caller must hold c_send_lock when calling here.
58 void rds_send_reset(struct rds_connection *conn)
60 struct rds_message *rm, *tmp;
63 if (conn->c_xmit_rm) {
64 /* Tell the user the RDMA op is no longer mapped by the
65 * transport. This isn't entirely true (it's flushed out
66 * independently) but as the connection is down, there's
67 * no ongoing RDMA to/from that memory */
68 rds_message_unmapped(conn->c_xmit_rm);
69 rds_message_put(conn->c_xmit_rm);
70 conn->c_xmit_rm = NULL;
73 conn->c_xmit_hdr_off = 0;
74 conn->c_xmit_data_off = 0;
75 conn->c_xmit_rdma_sent = 0;
76 conn->c_xmit_atomic_sent = 0;
78 conn->c_map_queued = 0;
80 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
81 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
83 /* Mark messages as retransmissions, and move them to the send q */
84 spin_lock_irqsave(&conn->c_lock, flags);
85 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
86 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
87 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
89 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
90 spin_unlock_irqrestore(&conn->c_lock, flags);
94 * We're making the concious trade-off here to only send one message
95 * down the connection at a time.
97 * - tx queueing is a simple fifo list
98 * - reassembly is optional and easily done by transports per conn
99 * - no per flow rx lookup at all, straight to the socket
100 * - less per-frag memory and wire overhead
102 * - queued acks can be delayed behind large messages
104 * - small message latency is higher behind queued large messages
105 * - large message latency isn't starved by intervening small sends
107 int rds_send_xmit(struct rds_connection *conn)
109 struct rds_message *rm;
112 unsigned int send_quota = send_batch_count;
113 struct scatterlist *sg;
116 LIST_HEAD(to_be_dropped);
119 * sendmsg calls here after having queued its message on the send
120 * queue. We only have one task feeding the connection at a time. If
121 * another thread is already feeding the queue then we back off. This
122 * avoids blocking the caller and trading per-connection data between
123 * caches per message.
125 * The sem holder will issue a retry if they notice that someone queued
126 * a message after they stopped walking the send queue but before they
129 if (!mutex_trylock(&conn->c_send_lock)) {
130 rds_stats_inc(s_send_sem_contention);
135 if (conn->c_trans->xmit_prepare)
136 conn->c_trans->xmit_prepare(conn);
139 * spin trying to push headers and data down the connection until
140 * the connection doens't make forward progress.
142 while (--send_quota) {
144 * See if need to send a congestion map update if we're
145 * between sending messages. The send_sem protects our sole
146 * use of c_map_offset and _bytes.
147 * Note this is used only by transports that define a special
148 * xmit_cong_map function. For all others, we create allocate
149 * a cong_map message and treat it just like any other send.
151 if (conn->c_map_bytes) {
152 ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
157 conn->c_map_offset += ret;
158 conn->c_map_bytes -= ret;
159 if (conn->c_map_bytes)
163 /* If we're done sending the current message, clear the
164 * offset and S/G temporaries.
166 rm = conn->c_xmit_rm;
168 conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
169 conn->c_xmit_sg == rm->data.m_nents) {
170 conn->c_xmit_rm = NULL;
172 conn->c_xmit_hdr_off = 0;
173 conn->c_xmit_data_off = 0;
174 conn->c_xmit_rdma_sent = 0;
175 conn->c_xmit_atomic_sent = 0;
177 /* Release the reference to the previous message. */
182 /* If we're asked to send a cong map update, do so.
184 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
185 if (conn->c_trans->xmit_cong_map) {
186 conn->c_map_offset = 0;
187 conn->c_map_bytes = sizeof(struct rds_header) +
192 rm = rds_cong_update_alloc(conn);
198 conn->c_xmit_rm = rm;
202 * Grab the next message from the send queue, if there is one.
204 * c_xmit_rm holds a ref while we're sending this message down
205 * the connction. We can use this ref while holding the
206 * send_sem.. rds_send_reset() is serialized with it.
211 spin_lock_irqsave(&conn->c_lock, flags);
213 if (!list_empty(&conn->c_send_queue)) {
214 rm = list_entry(conn->c_send_queue.next,
217 rds_message_addref(rm);
220 * Move the message from the send queue to the retransmit
223 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
226 spin_unlock_irqrestore(&conn->c_lock, flags);
233 /* Unfortunately, the way Infiniband deals with
234 * RDMA to a bad MR key is by moving the entire
235 * queue pair to error state. We cold possibly
236 * recover from that, but right now we drop the
238 * Therefore, we never retransmit messages with RDMA ops.
240 if (rm->rdma.m_rdma_op.r_active &&
241 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
242 spin_lock_irqsave(&conn->c_lock, flags);
243 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
244 list_move(&rm->m_conn_item, &to_be_dropped);
245 spin_unlock_irqrestore(&conn->c_lock, flags);
250 /* Require an ACK every once in a while */
251 len = ntohl(rm->m_inc.i_hdr.h_len);
252 if (conn->c_unacked_packets == 0 ||
253 conn->c_unacked_bytes < len) {
254 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
256 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
257 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
258 rds_stats_inc(s_send_ack_required);
260 conn->c_unacked_bytes -= len;
261 conn->c_unacked_packets--;
264 conn->c_xmit_rm = rm;
268 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
269 ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
272 conn->c_xmit_atomic_sent = 1;
273 /* The transport owns the mapped memory for now.
274 * You can't unmap it while it's on the send queue */
275 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
279 * Try and send an rdma message. Let's see if we can
280 * keep this simple and require that the transport either
281 * send the whole rdma or none of it.
283 if (rm->rdma.m_rdma_op.r_active && !conn->c_xmit_rdma_sent) {
284 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma.m_rdma_op);
287 conn->c_xmit_rdma_sent = 1;
288 /* The transport owns the mapped memory for now.
289 * You can't unmap it while it's on the send queue */
290 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
293 if (conn->c_xmit_hdr_off < sizeof(struct rds_header) ||
294 conn->c_xmit_sg < rm->data.m_nents) {
295 ret = conn->c_trans->xmit(conn, rm,
296 conn->c_xmit_hdr_off,
298 conn->c_xmit_data_off);
302 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
303 tmp = min_t(int, ret,
304 sizeof(struct rds_header) -
305 conn->c_xmit_hdr_off);
306 conn->c_xmit_hdr_off += tmp;
310 sg = &rm->data.m_sg[conn->c_xmit_sg];
312 tmp = min_t(int, ret, sg->length -
313 conn->c_xmit_data_off);
314 conn->c_xmit_data_off += tmp;
316 if (conn->c_xmit_data_off == sg->length) {
317 conn->c_xmit_data_off = 0;
321 conn->c_xmit_sg == rm->data.m_nents);
327 /* Nuke any messages we decided not to retransmit. */
328 if (!list_empty(&to_be_dropped))
329 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
331 if (conn->c_trans->xmit_complete)
332 conn->c_trans->xmit_complete(conn);
335 * We might be racing with another sender who queued a message but
336 * backed off on noticing that we held the c_send_lock. If we check
337 * for queued messages after dropping the sem then either we'll
338 * see the queued message or the queuer will get the sem. If we
339 * notice the queued message then we trigger an immediate retry.
341 * We need to be careful only to do this when we stopped processing
342 * the send queue because it was empty. It's the only way we
343 * stop processing the loop when the transport hasn't taken
344 * responsibility for forward progress.
346 mutex_unlock(&conn->c_send_lock);
348 if (conn->c_map_bytes || (send_quota == 0 && !was_empty)) {
349 /* We exhausted the send quota, but there's work left to
350 * do. Return and (re-)schedule the send worker.
355 if (ret == 0 && was_empty) {
356 /* A simple bit test would be way faster than taking the
358 spin_lock_irqsave(&conn->c_lock, flags);
359 if (!list_empty(&conn->c_send_queue)) {
360 rds_stats_inc(s_send_sem_queue_raced);
363 spin_unlock_irqrestore(&conn->c_lock, flags);
369 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
371 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
373 assert_spin_locked(&rs->rs_lock);
375 BUG_ON(rs->rs_snd_bytes < len);
376 rs->rs_snd_bytes -= len;
378 if (rs->rs_snd_bytes == 0)
379 rds_stats_inc(s_send_queue_empty);
382 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
383 is_acked_func is_acked)
386 return is_acked(rm, ack);
387 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
391 * Returns true if there are no messages on the send and retransmit queues
392 * which have a sequence number greater than or equal to the given sequence
395 int rds_send_acked_before(struct rds_connection *conn, u64 seq)
397 struct rds_message *rm, *tmp;
400 spin_lock(&conn->c_lock);
402 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
403 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
408 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
409 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
414 spin_unlock(&conn->c_lock);
420 * This is pretty similar to what happens below in the ACK
421 * handling code - except that we call here as soon as we get
422 * the IB send completion on the RDMA op and the accompanying
425 void rds_rdma_send_complete(struct rds_message *rm, int status)
427 struct rds_sock *rs = NULL;
428 struct rds_rdma_op *ro;
429 struct rds_notifier *notifier;
432 spin_lock_irqsave(&rm->m_rs_lock, flags);
434 ro = &rm->rdma.m_rdma_op;
435 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
436 ro->r_active && ro->r_notify && ro->r_notifier) {
437 notifier = ro->r_notifier;
439 sock_hold(rds_rs_to_sk(rs));
441 notifier->n_status = status;
442 spin_lock(&rs->rs_lock);
443 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
444 spin_unlock(&rs->rs_lock);
446 ro->r_notifier = NULL;
449 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
452 rds_wake_sk_sleep(rs);
453 sock_put(rds_rs_to_sk(rs));
456 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
459 * Just like above, except looks at atomic op
461 void rds_atomic_send_complete(struct rds_message *rm, int status)
463 struct rds_sock *rs = NULL;
464 struct rm_atomic_op *ao;
465 struct rds_notifier *notifier;
467 spin_lock(&rm->m_rs_lock);
470 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
471 && ao->op_active && ao->op_notify && ao->op_notifier) {
472 notifier = ao->op_notifier;
474 sock_hold(rds_rs_to_sk(rs));
476 notifier->n_status = status;
477 spin_lock(&rs->rs_lock);
478 list_add_tail(¬ifier->n_list, &rs->rs_notify_queue);
479 spin_unlock(&rs->rs_lock);
481 ao->op_notifier = NULL;
484 spin_unlock(&rm->m_rs_lock);
487 rds_wake_sk_sleep(rs);
488 sock_put(rds_rs_to_sk(rs));
491 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
494 * This is the same as rds_rdma_send_complete except we
495 * don't do any locking - we have all the ingredients (message,
496 * socket, socket lock) and can just move the notifier.
499 __rds_rdma_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
501 struct rds_rdma_op *ro;
503 ro = &rm->rdma.m_rdma_op;
504 if (ro->r_active && ro->r_notify && ro->r_notifier) {
505 ro->r_notifier->n_status = status;
506 list_add_tail(&ro->r_notifier->n_list, &rs->rs_notify_queue);
507 ro->r_notifier = NULL;
510 /* No need to wake the app - caller does this */
514 * This is called from the IB send completion when we detect
515 * a RDMA operation that failed with remote access error.
516 * So speed is not an issue here.
518 struct rds_message *rds_send_get_message(struct rds_connection *conn,
519 struct rds_rdma_op *op)
521 struct rds_message *rm, *tmp, *found = NULL;
524 spin_lock_irqsave(&conn->c_lock, flags);
526 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
527 if (&rm->rdma.m_rdma_op == op) {
528 atomic_inc(&rm->m_refcount);
534 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
535 if (&rm->rdma.m_rdma_op == op) {
536 atomic_inc(&rm->m_refcount);
543 spin_unlock_irqrestore(&conn->c_lock, flags);
547 EXPORT_SYMBOL_GPL(rds_send_get_message);
550 * This removes messages from the socket's list if they're on it. The list
551 * argument must be private to the caller, we must be able to modify it
552 * without locks. The messages must have a reference held for their
553 * position on the list. This function will drop that reference after
554 * removing the messages from the 'messages' list regardless of if it found
555 * the messages on the socket list or not.
557 void rds_send_remove_from_sock(struct list_head *messages, int status)
560 struct rds_sock *rs = NULL;
561 struct rds_message *rm;
563 while (!list_empty(messages)) {
566 rm = list_entry(messages->next, struct rds_message,
568 list_del_init(&rm->m_conn_item);
571 * If we see this flag cleared then we're *sure* that someone
572 * else beat us to removing it from the sock. If we race
573 * with their flag update we'll get the lock and then really
574 * see that the flag has been cleared.
576 * The message spinlock makes sure nobody clears rm->m_rs
577 * while we're messing with it. It does not prevent the
578 * message from being removed from the socket, though.
580 spin_lock_irqsave(&rm->m_rs_lock, flags);
581 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
582 goto unlock_and_drop;
584 if (rs != rm->m_rs) {
586 rds_wake_sk_sleep(rs);
587 sock_put(rds_rs_to_sk(rs));
590 sock_hold(rds_rs_to_sk(rs));
592 spin_lock(&rs->rs_lock);
594 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
595 struct rds_rdma_op *ro = &rm->rdma.m_rdma_op;
596 struct rds_notifier *notifier;
598 list_del_init(&rm->m_sock_item);
599 rds_send_sndbuf_remove(rs, rm);
601 if (ro->r_active && ro->r_notifier &&
602 (ro->r_notify || (ro->r_recverr && status))) {
603 notifier = ro->r_notifier;
604 list_add_tail(¬ifier->n_list,
605 &rs->rs_notify_queue);
606 if (!notifier->n_status)
607 notifier->n_status = status;
608 rm->rdma.m_rdma_op.r_notifier = NULL;
613 spin_unlock(&rs->rs_lock);
616 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
623 rds_wake_sk_sleep(rs);
624 sock_put(rds_rs_to_sk(rs));
629 * Transports call here when they've determined that the receiver queued
630 * messages up to, and including, the given sequence number. Messages are
631 * moved to the retrans queue when rds_send_xmit picks them off the send
632 * queue. This means that in the TCP case, the message may not have been
633 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
634 * checks the RDS_MSG_HAS_ACK_SEQ bit.
636 * XXX It's not clear to me how this is safely serialized with socket
637 * destruction. Maybe it should bail if it sees SOCK_DEAD.
639 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
640 is_acked_func is_acked)
642 struct rds_message *rm, *tmp;
646 spin_lock_irqsave(&conn->c_lock, flags);
648 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
649 if (!rds_send_is_acked(rm, ack, is_acked))
652 list_move(&rm->m_conn_item, &list);
653 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
656 /* order flag updates with spin locks */
657 if (!list_empty(&list))
658 smp_mb__after_clear_bit();
660 spin_unlock_irqrestore(&conn->c_lock, flags);
662 /* now remove the messages from the sock list as needed */
663 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
665 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
667 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
669 struct rds_message *rm, *tmp;
670 struct rds_connection *conn;
674 /* get all the messages we're dropping under the rs lock */
675 spin_lock_irqsave(&rs->rs_lock, flags);
677 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
678 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
679 dest->sin_port != rm->m_inc.i_hdr.h_dport))
682 list_move(&rm->m_sock_item, &list);
683 rds_send_sndbuf_remove(rs, rm);
684 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
687 /* order flag updates with the rs lock */
688 smp_mb__after_clear_bit();
690 spin_unlock_irqrestore(&rs->rs_lock, flags);
692 if (list_empty(&list))
695 /* Remove the messages from the conn */
696 list_for_each_entry(rm, &list, m_sock_item) {
698 conn = rm->m_inc.i_conn;
700 spin_lock_irqsave(&conn->c_lock, flags);
702 * Maybe someone else beat us to removing rm from the conn.
703 * If we race with their flag update we'll get the lock and
704 * then really see that the flag has been cleared.
706 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
707 spin_unlock_irqrestore(&conn->c_lock, flags);
710 list_del_init(&rm->m_conn_item);
711 spin_unlock_irqrestore(&conn->c_lock, flags);
714 * Couldn't grab m_rs_lock in top loop (lock ordering),
717 spin_lock_irqsave(&rm->m_rs_lock, flags);
719 spin_lock(&rs->rs_lock);
720 __rds_rdma_send_complete(rs, rm, RDS_RDMA_CANCELED);
721 spin_unlock(&rs->rs_lock);
724 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
729 rds_wake_sk_sleep(rs);
731 while (!list_empty(&list)) {
732 rm = list_entry(list.next, struct rds_message, m_sock_item);
733 list_del_init(&rm->m_sock_item);
735 rds_message_wait(rm);
741 * we only want this to fire once so we use the callers 'queued'. It's
742 * possible that another thread can race with us and remove the
743 * message from the flow with RDS_CANCEL_SENT_TO.
745 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
746 struct rds_message *rm, __be16 sport,
747 __be16 dport, int *queued)
755 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
757 /* this is the only place which holds both the socket's rs_lock
758 * and the connection's c_lock */
759 spin_lock_irqsave(&rs->rs_lock, flags);
762 * If there is a little space in sndbuf, we don't queue anything,
763 * and userspace gets -EAGAIN. But poll() indicates there's send
764 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
765 * freed up by incoming acks. So we check the *old* value of
766 * rs_snd_bytes here to allow the last msg to exceed the buffer,
767 * and poll() now knows no more data can be sent.
769 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
770 rs->rs_snd_bytes += len;
772 /* let recv side know we are close to send space exhaustion.
773 * This is probably not the optimal way to do it, as this
774 * means we set the flag on *all* messages as soon as our
775 * throughput hits a certain threshold.
777 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
778 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
780 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
781 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
782 rds_message_addref(rm);
785 /* The code ordering is a little weird, but we're
786 trying to minimize the time we hold c_lock */
787 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
788 rm->m_inc.i_conn = conn;
789 rds_message_addref(rm);
791 spin_lock(&conn->c_lock);
792 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
793 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
794 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
795 spin_unlock(&conn->c_lock);
797 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
798 rm, len, rs, rs->rs_snd_bytes,
799 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
804 spin_unlock_irqrestore(&rs->rs_lock, flags);
810 * rds_message is getting to be quite complicated, and we'd like to allocate
811 * it all in one go. This figures out how big it needs to be up front.
813 static int rds_rm_size(struct msghdr *msg, int data_len)
815 struct cmsghdr *cmsg;
819 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
820 if (!CMSG_OK(msg, cmsg))
823 if (cmsg->cmsg_level != SOL_RDS)
826 switch (cmsg->cmsg_type) {
827 case RDS_CMSG_RDMA_ARGS:
828 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
834 case RDS_CMSG_RDMA_DEST:
835 case RDS_CMSG_RDMA_MAP:
836 /* these are valid but do no add any size */
839 case RDS_CMSG_ATOMIC_CSWP:
840 case RDS_CMSG_ATOMIC_FADD:
841 size += sizeof(struct scatterlist);
850 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
855 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
856 struct msghdr *msg, int *allocated_mr)
858 struct cmsghdr *cmsg;
861 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
862 if (!CMSG_OK(msg, cmsg))
865 if (cmsg->cmsg_level != SOL_RDS)
868 /* As a side effect, RDMA_DEST and RDMA_MAP will set
869 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
871 switch (cmsg->cmsg_type) {
872 case RDS_CMSG_RDMA_ARGS:
873 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
876 case RDS_CMSG_RDMA_DEST:
877 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
880 case RDS_CMSG_RDMA_MAP:
881 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
885 case RDS_CMSG_ATOMIC_CSWP:
886 case RDS_CMSG_ATOMIC_FADD:
887 ret = rds_cmsg_atomic(rs, rm, cmsg);
901 int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
904 struct sock *sk = sock->sk;
905 struct rds_sock *rs = rds_sk_to_rs(sk);
906 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
909 struct rds_message *rm = NULL;
910 struct rds_connection *conn;
912 int queued = 0, allocated_mr = 0;
913 int nonblock = msg->msg_flags & MSG_DONTWAIT;
914 long timeo = sock_sndtimeo(sk, nonblock);
916 /* Mirror Linux UDP mirror of BSD error message compatibility */
917 /* XXX: Perhaps MSG_MORE someday */
918 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
919 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
924 if (msg->msg_namelen) {
925 /* XXX fail non-unicast destination IPs? */
926 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
930 daddr = usin->sin_addr.s_addr;
931 dport = usin->sin_port;
933 /* We only care about consistency with ->connect() */
935 daddr = rs->rs_conn_addr;
936 dport = rs->rs_conn_port;
940 /* racing with another thread binding seems ok here */
941 if (daddr == 0 || rs->rs_bound_addr == 0) {
942 ret = -ENOTCONN; /* XXX not a great errno */
946 /* size of rm including all sgs */
947 ret = rds_rm_size(msg, payload_len);
951 rm = rds_message_alloc(ret, GFP_KERNEL);
957 rm->data.m_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
958 /* XXX fix this to not allocate memory */
959 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
965 /* rds_conn_create has a spinlock that runs with IRQ off.
966 * Caching the conn in the socket helps a lot. */
967 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
970 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
972 sock->sk->sk_allocation);
980 /* Parse any control messages the user may have included. */
981 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
985 if ((rm->m_rdma_cookie || rm->rdma.m_rdma_op.r_active) &&
986 !conn->c_trans->xmit_rdma) {
987 if (printk_ratelimit())
988 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
989 &rm->rdma.m_rdma_op, conn->c_trans->xmit_rdma);
994 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
995 if (printk_ratelimit())
996 printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
997 &rm->atomic, conn->c_trans->xmit_atomic);
1002 /* If the connection is down, trigger a connect. We may
1003 * have scheduled a delayed reconnect however - in this case
1004 * we should not interfere.
1006 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1007 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1008 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1010 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1012 rs->rs_seen_congestion = 1;
1016 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1018 rds_stats_inc(s_send_queue_full);
1019 /* XXX make sure this is reasonable */
1020 if (payload_len > rds_sk_sndbuf(rs)) {
1029 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1030 rds_send_queue_rm(rs, conn, rm,
1035 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1036 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1046 * By now we've committed to the send. We reuse rds_send_worker()
1047 * to retry sends in the rds thread if the transport asks us to.
1049 rds_stats_inc(s_send_queued);
1051 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1052 rds_send_worker(&conn->c_send_w.work);
1054 rds_message_put(rm);
1058 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1059 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1060 * or in any other way, we need to destroy the MR again */
1062 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1065 rds_message_put(rm);
1070 * Reply to a ping packet.
1073 rds_send_pong(struct rds_connection *conn, __be16 dport)
1075 struct rds_message *rm;
1076 unsigned long flags;
1079 rm = rds_message_alloc(0, GFP_ATOMIC);
1085 rm->m_daddr = conn->c_faddr;
1087 /* If the connection is down, trigger a connect. We may
1088 * have scheduled a delayed reconnect however - in this case
1089 * we should not interfere.
1091 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1092 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1093 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1095 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1099 spin_lock_irqsave(&conn->c_lock, flags);
1100 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1101 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1102 rds_message_addref(rm);
1103 rm->m_inc.i_conn = conn;
1105 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1106 conn->c_next_tx_seq);
1107 conn->c_next_tx_seq++;
1108 spin_unlock_irqrestore(&conn->c_lock, flags);
1110 rds_stats_inc(s_send_queued);
1111 rds_stats_inc(s_send_pong);
1113 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1114 rds_message_put(rm);
1119 rds_message_put(rm);