]> bbs.cooldavid.org Git - net-next-2.6.git/blame - net/rds/send.c
RDS: Add flag for silent ops. Do atomic op before RDMA
[net-next-2.6.git] / net / rds / send.c
CommitLineData
5c115590
AG
1/*
2 * Copyright (c) 2006 Oracle. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 *
32 */
33#include <linux/kernel.h>
5a0e3ad6 34#include <linux/gfp.h>
5c115590
AG
35#include <net/sock.h>
36#include <linux/in.h>
37#include <linux/list.h>
38
39#include "rds.h"
5c115590
AG
40
41/* When transmitting messages in rds_send_xmit, we need to emerge from
42 * time to time and briefly release the CPU. Otherwise the softlock watchdog
43 * will kick our shin.
44 * Also, it seems fairer to not let one busy connection stall all the
45 * others.
46 *
47 * send_batch_count is the number of times we'll loop in send_xmit. Setting
48 * it to 0 will restore the old behavior (where we looped until we had
49 * drained the queue).
50 */
51static int send_batch_count = 64;
52module_param(send_batch_count, int, 0444);
53MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
54
55/*
56 * Reset the send state. Caller must hold c_send_lock when calling here.
57 */
58void rds_send_reset(struct rds_connection *conn)
59{
60 struct rds_message *rm, *tmp;
61 unsigned long flags;
62
63 if (conn->c_xmit_rm) {
64 /* Tell the user the RDMA op is no longer mapped by the
65 * transport. This isn't entirely true (it's flushed out
66 * independently) but as the connection is down, there's
67 * no ongoing RDMA to/from that memory */
68 rds_message_unmapped(conn->c_xmit_rm);
69 rds_message_put(conn->c_xmit_rm);
70 conn->c_xmit_rm = NULL;
71 }
72 conn->c_xmit_sg = 0;
73 conn->c_xmit_hdr_off = 0;
74 conn->c_xmit_data_off = 0;
15133f6e 75 conn->c_xmit_atomic_sent = 0;
5b2366bd
AG
76 conn->c_xmit_rdma_sent = 0;
77 conn->c_xmit_data_sent = 0;
5c115590
AG
78
79 conn->c_map_queued = 0;
80
81 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
82 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
83
84 /* Mark messages as retransmissions, and move them to the send q */
85 spin_lock_irqsave(&conn->c_lock, flags);
86 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
87 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
88 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
89 }
90 list_splice_init(&conn->c_retrans, &conn->c_send_queue);
91 spin_unlock_irqrestore(&conn->c_lock, flags);
92}
93
94/*
95 * We're making the concious trade-off here to only send one message
96 * down the connection at a time.
97 * Pro:
98 * - tx queueing is a simple fifo list
99 * - reassembly is optional and easily done by transports per conn
100 * - no per flow rx lookup at all, straight to the socket
101 * - less per-frag memory and wire overhead
102 * Con:
103 * - queued acks can be delayed behind large messages
104 * Depends:
105 * - small message latency is higher behind queued large messages
106 * - large message latency isn't starved by intervening small sends
107 */
108int rds_send_xmit(struct rds_connection *conn)
109{
110 struct rds_message *rm;
111 unsigned long flags;
112 unsigned int tmp;
113 unsigned int send_quota = send_batch_count;
114 struct scatterlist *sg;
115 int ret = 0;
116 int was_empty = 0;
117 LIST_HEAD(to_be_dropped);
118
119 /*
120 * sendmsg calls here after having queued its message on the send
121 * queue. We only have one task feeding the connection at a time. If
122 * another thread is already feeding the queue then we back off. This
123 * avoids blocking the caller and trading per-connection data between
124 * caches per message.
125 *
126 * The sem holder will issue a retry if they notice that someone queued
127 * a message after they stopped walking the send queue but before they
128 * dropped the sem.
129 */
130 if (!mutex_trylock(&conn->c_send_lock)) {
131 rds_stats_inc(s_send_sem_contention);
132 ret = -ENOMEM;
133 goto out;
134 }
135
136 if (conn->c_trans->xmit_prepare)
137 conn->c_trans->xmit_prepare(conn);
138
139 /*
140 * spin trying to push headers and data down the connection until
5b2366bd 141 * the connection doesn't make forward progress.
5c115590
AG
142 */
143 while (--send_quota) {
5c115590 144
5c115590 145 rm = conn->c_xmit_rm;
5c115590 146
5b2366bd
AG
147 /*
148 * If between sending messages, we can send a pending congestion
149 * map update.
150 *
151 * Transports either define a special xmit_cong_map function,
152 * or we allocate a cong_map message and treat it just like any
153 * other send.
5c115590 154 */
8690bfa1
AG
155 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
156 if (conn->c_trans->xmit_cong_map) {
5b2366bd
AG
157 unsigned long map_offset = 0;
158 unsigned long map_bytes = sizeof(struct rds_header) +
5c115590 159 RDS_CONG_MAP_BYTES;
5c115590 160
5b2366bd
AG
161 while (map_bytes) {
162 ret = conn->c_trans->xmit_cong_map(conn, conn->c_lcong,
163 map_offset);
164 if (ret <= 0) {
165 /* too far down the rabbithole! */
166 mutex_unlock(&conn->c_send_lock);
167 rds_conn_error(conn, "Cong map xmit failed\n");
168 goto out;
169 }
170
171 map_offset += ret;
172 map_bytes -= ret;
173 }
174 } else {
175 /* send cong update like a normal rm */
176 rm = rds_cong_update_alloc(conn);
177 if (IS_ERR(rm)) {
178 ret = PTR_ERR(rm);
179 break;
180 }
181 rm->data.op_active = 1;
5c115590 182
5b2366bd
AG
183 conn->c_xmit_rm = rm;
184 }
5c115590
AG
185 }
186
187 /*
5b2366bd 188 * If not already working on one, grab the next message.
5c115590
AG
189 *
190 * c_xmit_rm holds a ref while we're sending this message down
191 * the connction. We can use this ref while holding the
192 * send_sem.. rds_send_reset() is serialized with it.
193 */
8690bfa1 194 if (!rm) {
5c115590
AG
195 unsigned int len;
196
197 spin_lock_irqsave(&conn->c_lock, flags);
198
199 if (!list_empty(&conn->c_send_queue)) {
200 rm = list_entry(conn->c_send_queue.next,
201 struct rds_message,
202 m_conn_item);
203 rds_message_addref(rm);
204
205 /*
206 * Move the message from the send queue to the retransmit
207 * list right away.
208 */
209 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
210 }
211
212 spin_unlock_irqrestore(&conn->c_lock, flags);
213
8690bfa1 214 if (!rm) {
5c115590
AG
215 was_empty = 1;
216 break;
217 }
218
219 /* Unfortunately, the way Infiniband deals with
220 * RDMA to a bad MR key is by moving the entire
221 * queue pair to error state. We cold possibly
222 * recover from that, but right now we drop the
223 * connection.
224 * Therefore, we never retransmit messages with RDMA ops.
225 */
f8b3aaf2 226 if (rm->rdma.op_active &&
f64f9e71 227 test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
5c115590
AG
228 spin_lock_irqsave(&conn->c_lock, flags);
229 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
230 list_move(&rm->m_conn_item, &to_be_dropped);
231 spin_unlock_irqrestore(&conn->c_lock, flags);
232 rds_message_put(rm);
233 continue;
234 }
235
236 /* Require an ACK every once in a while */
237 len = ntohl(rm->m_inc.i_hdr.h_len);
f64f9e71
JP
238 if (conn->c_unacked_packets == 0 ||
239 conn->c_unacked_bytes < len) {
5c115590
AG
240 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
241
242 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
243 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
244 rds_stats_inc(s_send_ack_required);
245 } else {
246 conn->c_unacked_bytes -= len;
247 conn->c_unacked_packets--;
248 }
249
250 conn->c_xmit_rm = rm;
251 }
252
2c3a5f9a
AG
253 /* The transport either sends the whole rdma or none of it */
254 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
255 ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
15133f6e
AG
256 if (ret)
257 break;
2c3a5f9a
AG
258 conn->c_xmit_rdma_sent = 1;
259
15133f6e
AG
260 /* The transport owns the mapped memory for now.
261 * You can't unmap it while it's on the send queue */
262 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
263 }
264
2c3a5f9a
AG
265 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
266 ret = conn->c_trans->xmit_atomic(conn, rm);
5c115590
AG
267 if (ret)
268 break;
2c3a5f9a 269 conn->c_xmit_atomic_sent = 1;
5c115590
AG
270 /* The transport owns the mapped memory for now.
271 * You can't unmap it while it's on the send queue */
272 set_bit(RDS_MSG_MAPPED, &rm->m_flags);
273 }
274
2c3a5f9a
AG
275 /*
276 * A number of cases require an RDS header to be sent
277 * even if there is no data.
278 * We permit 0-byte sends; rds-ping depends on this.
279 * However, if there are exclusively attached silent ops,
280 * we skip the hdr/data send, to enable silent operation.
281 */
282 if (rm->data.op_nents == 0) {
283 int ops_present;
284 int all_ops_are_silent = 1;
285
286 ops_present = (rm->atomic.op_active || rm->rdma.op_active);
287 if (rm->atomic.op_active && !rm->atomic.op_silent)
288 all_ops_are_silent = 0;
289 if (rm->rdma.op_active && !rm->rdma.op_silent)
290 all_ops_are_silent = 0;
291
292 if (ops_present && all_ops_are_silent
293 && !rm->m_rdma_cookie)
294 rm->data.op_active = 0;
295 }
296
5b2366bd 297 if (rm->data.op_active && !conn->c_xmit_data_sent) {
5c115590
AG
298 ret = conn->c_trans->xmit(conn, rm,
299 conn->c_xmit_hdr_off,
300 conn->c_xmit_sg,
301 conn->c_xmit_data_off);
302 if (ret <= 0)
303 break;
304
305 if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
306 tmp = min_t(int, ret,
307 sizeof(struct rds_header) -
308 conn->c_xmit_hdr_off);
309 conn->c_xmit_hdr_off += tmp;
310 ret -= tmp;
311 }
312
6c7cc6e4 313 sg = &rm->data.op_sg[conn->c_xmit_sg];
5c115590
AG
314 while (ret) {
315 tmp = min_t(int, ret, sg->length -
316 conn->c_xmit_data_off);
317 conn->c_xmit_data_off += tmp;
318 ret -= tmp;
319 if (conn->c_xmit_data_off == sg->length) {
320 conn->c_xmit_data_off = 0;
321 sg++;
322 conn->c_xmit_sg++;
323 BUG_ON(ret != 0 &&
6c7cc6e4 324 conn->c_xmit_sg == rm->data.op_nents);
5c115590
AG
325 }
326 }
5b2366bd
AG
327
328 if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
329 (conn->c_xmit_sg == rm->data.op_nents))
330 conn->c_xmit_data_sent = 1;
331 }
332
333 /*
334 * A rm will only take multiple times through this loop
335 * if there is a data op. Thus, if the data is sent (or there was
336 * none), then we're done with the rm.
337 */
338 if (!rm->data.op_active || conn->c_xmit_data_sent) {
339 conn->c_xmit_rm = NULL;
340 conn->c_xmit_sg = 0;
341 conn->c_xmit_hdr_off = 0;
342 conn->c_xmit_data_off = 0;
343 conn->c_xmit_rdma_sent = 0;
344 conn->c_xmit_atomic_sent = 0;
345 conn->c_xmit_data_sent = 0;
346
347 rds_message_put(rm);
5c115590
AG
348 }
349 }
350
351 /* Nuke any messages we decided not to retransmit. */
352 if (!list_empty(&to_be_dropped))
353 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
354
355 if (conn->c_trans->xmit_complete)
356 conn->c_trans->xmit_complete(conn);
357
358 /*
359 * We might be racing with another sender who queued a message but
360 * backed off on noticing that we held the c_send_lock. If we check
361 * for queued messages after dropping the sem then either we'll
362 * see the queued message or the queuer will get the sem. If we
363 * notice the queued message then we trigger an immediate retry.
364 *
365 * We need to be careful only to do this when we stopped processing
366 * the send queue because it was empty. It's the only way we
367 * stop processing the loop when the transport hasn't taken
368 * responsibility for forward progress.
369 */
370 mutex_unlock(&conn->c_send_lock);
371
5b2366bd 372 if (send_quota == 0 && !was_empty) {
5c115590
AG
373 /* We exhausted the send quota, but there's work left to
374 * do. Return and (re-)schedule the send worker.
375 */
376 ret = -EAGAIN;
377 }
378
379 if (ret == 0 && was_empty) {
380 /* A simple bit test would be way faster than taking the
381 * spin lock */
382 spin_lock_irqsave(&conn->c_lock, flags);
383 if (!list_empty(&conn->c_send_queue)) {
384 rds_stats_inc(s_send_sem_queue_raced);
385 ret = -EAGAIN;
386 }
387 spin_unlock_irqrestore(&conn->c_lock, flags);
388 }
389out:
390 return ret;
391}
392
393static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
394{
395 u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
396
397 assert_spin_locked(&rs->rs_lock);
398
399 BUG_ON(rs->rs_snd_bytes < len);
400 rs->rs_snd_bytes -= len;
401
402 if (rs->rs_snd_bytes == 0)
403 rds_stats_inc(s_send_queue_empty);
404}
405
406static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
407 is_acked_func is_acked)
408{
409 if (is_acked)
410 return is_acked(rm, ack);
411 return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
412}
413
414/*
415 * Returns true if there are no messages on the send and retransmit queues
416 * which have a sequence number greater than or equal to the given sequence
417 * number.
418 */
419int rds_send_acked_before(struct rds_connection *conn, u64 seq)
420{
421 struct rds_message *rm, *tmp;
422 int ret = 1;
423
424 spin_lock(&conn->c_lock);
425
426 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
427 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
428 ret = 0;
429 break;
430 }
431
432 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
433 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
434 ret = 0;
435 break;
436 }
437
438 spin_unlock(&conn->c_lock);
439
440 return ret;
441}
442
443/*
444 * This is pretty similar to what happens below in the ACK
445 * handling code - except that we call here as soon as we get
446 * the IB send completion on the RDMA op and the accompanying
447 * message.
448 */
449void rds_rdma_send_complete(struct rds_message *rm, int status)
450{
451 struct rds_sock *rs = NULL;
f8b3aaf2 452 struct rm_rdma_op *ro;
5c115590 453 struct rds_notifier *notifier;
9de0864c 454 unsigned long flags;
5c115590 455
9de0864c 456 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590 457
f8b3aaf2 458 ro = &rm->rdma;
f64f9e71 459 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
f8b3aaf2
AG
460 ro->op_active && ro->op_notify && ro->op_notifier) {
461 notifier = ro->op_notifier;
5c115590
AG
462 rs = rm->m_rs;
463 sock_hold(rds_rs_to_sk(rs));
464
465 notifier->n_status = status;
466 spin_lock(&rs->rs_lock);
467 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
468 spin_unlock(&rs->rs_lock);
469
f8b3aaf2 470 ro->op_notifier = NULL;
5c115590
AG
471 }
472
9de0864c 473 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590
AG
474
475 if (rs) {
476 rds_wake_sk_sleep(rs);
477 sock_put(rds_rs_to_sk(rs));
478 }
479}
616b757a 480EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
5c115590 481
15133f6e
AG
482/*
483 * Just like above, except looks at atomic op
484 */
485void rds_atomic_send_complete(struct rds_message *rm, int status)
486{
487 struct rds_sock *rs = NULL;
488 struct rm_atomic_op *ao;
489 struct rds_notifier *notifier;
490
491 spin_lock(&rm->m_rs_lock);
492
493 ao = &rm->atomic;
494 if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
495 && ao->op_active && ao->op_notify && ao->op_notifier) {
496 notifier = ao->op_notifier;
497 rs = rm->m_rs;
498 sock_hold(rds_rs_to_sk(rs));
499
500 notifier->n_status = status;
501 spin_lock(&rs->rs_lock);
502 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
503 spin_unlock(&rs->rs_lock);
504
505 ao->op_notifier = NULL;
506 }
507
508 spin_unlock(&rm->m_rs_lock);
509
510 if (rs) {
511 rds_wake_sk_sleep(rs);
512 sock_put(rds_rs_to_sk(rs));
513 }
514}
515EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
516
5c115590
AG
517/*
518 * This is the same as rds_rdma_send_complete except we
519 * don't do any locking - we have all the ingredients (message,
520 * socket, socket lock) and can just move the notifier.
521 */
522static inline void
940786eb 523__rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
5c115590 524{
f8b3aaf2 525 struct rm_rdma_op *ro;
940786eb 526 struct rm_atomic_op *ao;
5c115590 527
f8b3aaf2
AG
528 ro = &rm->rdma;
529 if (ro->op_active && ro->op_notify && ro->op_notifier) {
530 ro->op_notifier->n_status = status;
531 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
532 ro->op_notifier = NULL;
5c115590
AG
533 }
534
940786eb
AG
535 ao = &rm->atomic;
536 if (ao->op_active && ao->op_notify && ao->op_notifier) {
537 ao->op_notifier->n_status = status;
538 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
539 ao->op_notifier = NULL;
540 }
541
5c115590
AG
542 /* No need to wake the app - caller does this */
543}
544
545/*
546 * This is called from the IB send completion when we detect
547 * a RDMA operation that failed with remote access error.
548 * So speed is not an issue here.
549 */
550struct rds_message *rds_send_get_message(struct rds_connection *conn,
f8b3aaf2 551 struct rm_rdma_op *op)
5c115590
AG
552{
553 struct rds_message *rm, *tmp, *found = NULL;
554 unsigned long flags;
555
556 spin_lock_irqsave(&conn->c_lock, flags);
557
558 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
f8b3aaf2 559 if (&rm->rdma == op) {
5c115590
AG
560 atomic_inc(&rm->m_refcount);
561 found = rm;
562 goto out;
563 }
564 }
565
566 list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
f8b3aaf2 567 if (&rm->rdma == op) {
5c115590
AG
568 atomic_inc(&rm->m_refcount);
569 found = rm;
570 break;
571 }
572 }
573
574out:
575 spin_unlock_irqrestore(&conn->c_lock, flags);
576
577 return found;
578}
616b757a 579EXPORT_SYMBOL_GPL(rds_send_get_message);
5c115590
AG
580
581/*
582 * This removes messages from the socket's list if they're on it. The list
583 * argument must be private to the caller, we must be able to modify it
584 * without locks. The messages must have a reference held for their
585 * position on the list. This function will drop that reference after
586 * removing the messages from the 'messages' list regardless of if it found
587 * the messages on the socket list or not.
588 */
589void rds_send_remove_from_sock(struct list_head *messages, int status)
590{
561c7df6 591 unsigned long flags;
5c115590
AG
592 struct rds_sock *rs = NULL;
593 struct rds_message *rm;
594
5c115590 595 while (!list_empty(messages)) {
561c7df6
AG
596 int was_on_sock = 0;
597
5c115590
AG
598 rm = list_entry(messages->next, struct rds_message,
599 m_conn_item);
600 list_del_init(&rm->m_conn_item);
601
602 /*
603 * If we see this flag cleared then we're *sure* that someone
604 * else beat us to removing it from the sock. If we race
605 * with their flag update we'll get the lock and then really
606 * see that the flag has been cleared.
607 *
608 * The message spinlock makes sure nobody clears rm->m_rs
609 * while we're messing with it. It does not prevent the
610 * message from being removed from the socket, though.
611 */
561c7df6 612 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590
AG
613 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
614 goto unlock_and_drop;
615
616 if (rs != rm->m_rs) {
617 if (rs) {
5c115590
AG
618 rds_wake_sk_sleep(rs);
619 sock_put(rds_rs_to_sk(rs));
620 }
621 rs = rm->m_rs;
5c115590
AG
622 sock_hold(rds_rs_to_sk(rs));
623 }
048c15e6 624 spin_lock(&rs->rs_lock);
5c115590
AG
625
626 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
f8b3aaf2 627 struct rm_rdma_op *ro = &rm->rdma;
5c115590
AG
628 struct rds_notifier *notifier;
629
630 list_del_init(&rm->m_sock_item);
631 rds_send_sndbuf_remove(rs, rm);
632
f8b3aaf2
AG
633 if (ro->op_active && ro->op_notifier &&
634 (ro->op_notify || (ro->op_recverr && status))) {
635 notifier = ro->op_notifier;
5c115590
AG
636 list_add_tail(&notifier->n_list,
637 &rs->rs_notify_queue);
638 if (!notifier->n_status)
639 notifier->n_status = status;
f8b3aaf2 640 rm->rdma.op_notifier = NULL;
5c115590 641 }
561c7df6 642 was_on_sock = 1;
5c115590
AG
643 rm->m_rs = NULL;
644 }
048c15e6 645 spin_unlock(&rs->rs_lock);
5c115590
AG
646
647unlock_and_drop:
561c7df6 648 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
5c115590 649 rds_message_put(rm);
561c7df6
AG
650 if (was_on_sock)
651 rds_message_put(rm);
5c115590
AG
652 }
653
654 if (rs) {
5c115590
AG
655 rds_wake_sk_sleep(rs);
656 sock_put(rds_rs_to_sk(rs));
657 }
5c115590
AG
658}
659
660/*
661 * Transports call here when they've determined that the receiver queued
662 * messages up to, and including, the given sequence number. Messages are
663 * moved to the retrans queue when rds_send_xmit picks them off the send
664 * queue. This means that in the TCP case, the message may not have been
665 * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
666 * checks the RDS_MSG_HAS_ACK_SEQ bit.
667 *
668 * XXX It's not clear to me how this is safely serialized with socket
669 * destruction. Maybe it should bail if it sees SOCK_DEAD.
670 */
671void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
672 is_acked_func is_acked)
673{
674 struct rds_message *rm, *tmp;
675 unsigned long flags;
676 LIST_HEAD(list);
677
678 spin_lock_irqsave(&conn->c_lock, flags);
679
680 list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
681 if (!rds_send_is_acked(rm, ack, is_acked))
682 break;
683
684 list_move(&rm->m_conn_item, &list);
685 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
686 }
687
688 /* order flag updates with spin locks */
689 if (!list_empty(&list))
690 smp_mb__after_clear_bit();
691
692 spin_unlock_irqrestore(&conn->c_lock, flags);
693
694 /* now remove the messages from the sock list as needed */
695 rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
696}
616b757a 697EXPORT_SYMBOL_GPL(rds_send_drop_acked);
5c115590
AG
698
699void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
700{
701 struct rds_message *rm, *tmp;
702 struct rds_connection *conn;
7c82eaf0 703 unsigned long flags;
5c115590 704 LIST_HEAD(list);
5c115590
AG
705
706 /* get all the messages we're dropping under the rs lock */
707 spin_lock_irqsave(&rs->rs_lock, flags);
708
709 list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
710 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
711 dest->sin_port != rm->m_inc.i_hdr.h_dport))
712 continue;
713
5c115590
AG
714 list_move(&rm->m_sock_item, &list);
715 rds_send_sndbuf_remove(rs, rm);
716 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
5c115590
AG
717 }
718
719 /* order flag updates with the rs lock */
7c82eaf0 720 smp_mb__after_clear_bit();
5c115590
AG
721
722 spin_unlock_irqrestore(&rs->rs_lock, flags);
723
7c82eaf0
AG
724 if (list_empty(&list))
725 return;
5c115590 726
7c82eaf0 727 /* Remove the messages from the conn */
5c115590 728 list_for_each_entry(rm, &list, m_sock_item) {
7c82eaf0
AG
729
730 conn = rm->m_inc.i_conn;
5c115590 731
9de0864c 732 spin_lock_irqsave(&conn->c_lock, flags);
5c115590 733 /*
7c82eaf0
AG
734 * Maybe someone else beat us to removing rm from the conn.
735 * If we race with their flag update we'll get the lock and
736 * then really see that the flag has been cleared.
5c115590 737 */
7c82eaf0
AG
738 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
739 spin_unlock_irqrestore(&conn->c_lock, flags);
5c115590 740 continue;
5c115590 741 }
9de0864c
AG
742 list_del_init(&rm->m_conn_item);
743 spin_unlock_irqrestore(&conn->c_lock, flags);
5c115590 744
7c82eaf0
AG
745 /*
746 * Couldn't grab m_rs_lock in top loop (lock ordering),
747 * but we can now.
748 */
9de0864c 749 spin_lock_irqsave(&rm->m_rs_lock, flags);
5c115590 750
7c82eaf0 751 spin_lock(&rs->rs_lock);
940786eb 752 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
7c82eaf0
AG
753 spin_unlock(&rs->rs_lock);
754
755 rm->m_rs = NULL;
9de0864c 756 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
7c82eaf0 757
7c82eaf0 758 rds_message_put(rm);
7c82eaf0 759 }
5c115590 760
7c82eaf0 761 rds_wake_sk_sleep(rs);
550a8002 762
5c115590
AG
763 while (!list_empty(&list)) {
764 rm = list_entry(list.next, struct rds_message, m_sock_item);
765 list_del_init(&rm->m_sock_item);
766
767 rds_message_wait(rm);
768 rds_message_put(rm);
769 }
770}
771
772/*
773 * we only want this to fire once so we use the callers 'queued'. It's
774 * possible that another thread can race with us and remove the
775 * message from the flow with RDS_CANCEL_SENT_TO.
776 */
777static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
778 struct rds_message *rm, __be16 sport,
779 __be16 dport, int *queued)
780{
781 unsigned long flags;
782 u32 len;
783
784 if (*queued)
785 goto out;
786
787 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
788
789 /* this is the only place which holds both the socket's rs_lock
790 * and the connection's c_lock */
791 spin_lock_irqsave(&rs->rs_lock, flags);
792
793 /*
794 * If there is a little space in sndbuf, we don't queue anything,
795 * and userspace gets -EAGAIN. But poll() indicates there's send
796 * room. This can lead to bad behavior (spinning) if snd_bytes isn't
797 * freed up by incoming acks. So we check the *old* value of
798 * rs_snd_bytes here to allow the last msg to exceed the buffer,
799 * and poll() now knows no more data can be sent.
800 */
801 if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
802 rs->rs_snd_bytes += len;
803
804 /* let recv side know we are close to send space exhaustion.
805 * This is probably not the optimal way to do it, as this
806 * means we set the flag on *all* messages as soon as our
807 * throughput hits a certain threshold.
808 */
809 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
810 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
811
812 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
813 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
814 rds_message_addref(rm);
815 rm->m_rs = rs;
816
817 /* The code ordering is a little weird, but we're
818 trying to minimize the time we hold c_lock */
819 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
820 rm->m_inc.i_conn = conn;
821 rds_message_addref(rm);
822
823 spin_lock(&conn->c_lock);
824 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
825 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
826 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
827 spin_unlock(&conn->c_lock);
828
829 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
830 rm, len, rs, rs->rs_snd_bytes,
831 (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
832
833 *queued = 1;
834 }
835
836 spin_unlock_irqrestore(&rs->rs_lock, flags);
837out:
838 return *queued;
839}
840
fc445084
AG
841/*
842 * rds_message is getting to be quite complicated, and we'd like to allocate
843 * it all in one go. This figures out how big it needs to be up front.
844 */
845static int rds_rm_size(struct msghdr *msg, int data_len)
846{
ff87e97a 847 struct cmsghdr *cmsg;
fc445084 848 int size = 0;
ff87e97a
AG
849 int retval;
850
851 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
852 if (!CMSG_OK(msg, cmsg))
853 return -EINVAL;
854
855 if (cmsg->cmsg_level != SOL_RDS)
856 continue;
857
858 switch (cmsg->cmsg_type) {
859 case RDS_CMSG_RDMA_ARGS:
860 retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
861 if (retval < 0)
862 return retval;
863 size += retval;
864 break;
865
866 case RDS_CMSG_RDMA_DEST:
867 case RDS_CMSG_RDMA_MAP:
868 /* these are valid but do no add any size */
869 break;
870
15133f6e
AG
871 case RDS_CMSG_ATOMIC_CSWP:
872 case RDS_CMSG_ATOMIC_FADD:
873 size += sizeof(struct scatterlist);
874 break;
875
ff87e97a
AG
876 default:
877 return -EINVAL;
878 }
879
880 }
fc445084 881
ff87e97a 882 size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
fc445084
AG
883
884 return size;
885}
886
5c115590
AG
887static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
888 struct msghdr *msg, int *allocated_mr)
889{
890 struct cmsghdr *cmsg;
891 int ret = 0;
892
893 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
894 if (!CMSG_OK(msg, cmsg))
895 return -EINVAL;
896
897 if (cmsg->cmsg_level != SOL_RDS)
898 continue;
899
900 /* As a side effect, RDMA_DEST and RDMA_MAP will set
15133f6e 901 * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
5c115590
AG
902 */
903 switch (cmsg->cmsg_type) {
904 case RDS_CMSG_RDMA_ARGS:
905 ret = rds_cmsg_rdma_args(rs, rm, cmsg);
906 break;
907
908 case RDS_CMSG_RDMA_DEST:
909 ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
910 break;
911
912 case RDS_CMSG_RDMA_MAP:
913 ret = rds_cmsg_rdma_map(rs, rm, cmsg);
914 if (!ret)
915 *allocated_mr = 1;
916 break;
15133f6e
AG
917 case RDS_CMSG_ATOMIC_CSWP:
918 case RDS_CMSG_ATOMIC_FADD:
919 ret = rds_cmsg_atomic(rs, rm, cmsg);
920 break;
5c115590
AG
921
922 default:
923 return -EINVAL;
924 }
925
926 if (ret)
927 break;
928 }
929
930 return ret;
931}
932
933int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
934 size_t payload_len)
935{
936 struct sock *sk = sock->sk;
937 struct rds_sock *rs = rds_sk_to_rs(sk);
938 struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
939 __be32 daddr;
940 __be16 dport;
941 struct rds_message *rm = NULL;
942 struct rds_connection *conn;
943 int ret = 0;
944 int queued = 0, allocated_mr = 0;
945 int nonblock = msg->msg_flags & MSG_DONTWAIT;
1123fd73 946 long timeo = sock_sndtimeo(sk, nonblock);
5c115590
AG
947
948 /* Mirror Linux UDP mirror of BSD error message compatibility */
949 /* XXX: Perhaps MSG_MORE someday */
950 if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
951 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
952 ret = -EOPNOTSUPP;
953 goto out;
954 }
955
956 if (msg->msg_namelen) {
957 /* XXX fail non-unicast destination IPs? */
958 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
959 ret = -EINVAL;
960 goto out;
961 }
962 daddr = usin->sin_addr.s_addr;
963 dport = usin->sin_port;
964 } else {
965 /* We only care about consistency with ->connect() */
966 lock_sock(sk);
967 daddr = rs->rs_conn_addr;
968 dport = rs->rs_conn_port;
969 release_sock(sk);
970 }
971
972 /* racing with another thread binding seems ok here */
973 if (daddr == 0 || rs->rs_bound_addr == 0) {
974 ret = -ENOTCONN; /* XXX not a great errno */
975 goto out;
976 }
977
fc445084
AG
978 /* size of rm including all sgs */
979 ret = rds_rm_size(msg, payload_len);
980 if (ret < 0)
981 goto out;
982
983 rm = rds_message_alloc(ret, GFP_KERNEL);
984 if (!rm) {
985 ret = -ENOMEM;
5c115590
AG
986 goto out;
987 }
988
372cd7de
AG
989 /* Attach data to the rm */
990 if (payload_len) {
991 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
992 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
993 if (ret)
994 goto out;
995 }
996 rm->data.op_active = 1;
fc445084 997
5c115590
AG
998 rm->m_daddr = daddr;
999
5c115590
AG
1000 /* rds_conn_create has a spinlock that runs with IRQ off.
1001 * Caching the conn in the socket helps a lot. */
1002 if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
1003 conn = rs->rs_conn;
1004 else {
1005 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
1006 rs->rs_transport,
1007 sock->sk->sk_allocation);
1008 if (IS_ERR(conn)) {
1009 ret = PTR_ERR(conn);
1010 goto out;
1011 }
1012 rs->rs_conn = conn;
1013 }
1014
49f69691
AG
1015 /* Parse any control messages the user may have included. */
1016 ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1017 if (ret)
1018 goto out;
1019
2c3a5f9a 1020 if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
5c115590
AG
1021 if (printk_ratelimit())
1022 printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
f8b3aaf2 1023 &rm->rdma, conn->c_trans->xmit_rdma);
15133f6e
AG
1024 ret = -EOPNOTSUPP;
1025 goto out;
1026 }
1027
1028 if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1029 if (printk_ratelimit())
1030 printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1031 &rm->atomic, conn->c_trans->xmit_atomic);
5c115590
AG
1032 ret = -EOPNOTSUPP;
1033 goto out;
1034 }
1035
1036 /* If the connection is down, trigger a connect. We may
1037 * have scheduled a delayed reconnect however - in this case
1038 * we should not interfere.
1039 */
f64f9e71
JP
1040 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1041 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
5c115590
AG
1042 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1043
1044 ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
b98ba52f
AG
1045 if (ret) {
1046 rs->rs_seen_congestion = 1;
5c115590 1047 goto out;
b98ba52f 1048 }
5c115590
AG
1049
1050 while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1051 dport, &queued)) {
1052 rds_stats_inc(s_send_queue_full);
1053 /* XXX make sure this is reasonable */
1054 if (payload_len > rds_sk_sndbuf(rs)) {
1055 ret = -EMSGSIZE;
1056 goto out;
1057 }
1058 if (nonblock) {
1059 ret = -EAGAIN;
1060 goto out;
1061 }
1062
aa395145 1063 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
5c115590
AG
1064 rds_send_queue_rm(rs, conn, rm,
1065 rs->rs_bound_port,
1066 dport,
1067 &queued),
1068 timeo);
1069 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1070 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1071 continue;
1072
1073 ret = timeo;
1074 if (ret == 0)
1075 ret = -ETIMEDOUT;
1076 goto out;
1077 }
1078
1079 /*
1080 * By now we've committed to the send. We reuse rds_send_worker()
1081 * to retry sends in the rds thread if the transport asks us to.
1082 */
1083 rds_stats_inc(s_send_queued);
1084
1085 if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1086 rds_send_worker(&conn->c_send_w.work);
1087
1088 rds_message_put(rm);
1089 return payload_len;
1090
1091out:
1092 /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1093 * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1094 * or in any other way, we need to destroy the MR again */
1095 if (allocated_mr)
1096 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1097
1098 if (rm)
1099 rds_message_put(rm);
1100 return ret;
1101}
1102
1103/*
1104 * Reply to a ping packet.
1105 */
1106int
1107rds_send_pong(struct rds_connection *conn, __be16 dport)
1108{
1109 struct rds_message *rm;
1110 unsigned long flags;
1111 int ret = 0;
1112
1113 rm = rds_message_alloc(0, GFP_ATOMIC);
8690bfa1 1114 if (!rm) {
5c115590
AG
1115 ret = -ENOMEM;
1116 goto out;
1117 }
1118
1119 rm->m_daddr = conn->c_faddr;
1120
1121 /* If the connection is down, trigger a connect. We may
1122 * have scheduled a delayed reconnect however - in this case
1123 * we should not interfere.
1124 */
f64f9e71
JP
1125 if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1126 !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
5c115590
AG
1127 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1128
1129 ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1130 if (ret)
1131 goto out;
1132
1133 spin_lock_irqsave(&conn->c_lock, flags);
1134 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1135 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1136 rds_message_addref(rm);
1137 rm->m_inc.i_conn = conn;
1138
1139 rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1140 conn->c_next_tx_seq);
1141 conn->c_next_tx_seq++;
1142 spin_unlock_irqrestore(&conn->c_lock, flags);
1143
1144 rds_stats_inc(s_send_queued);
1145 rds_stats_inc(s_send_pong);
1146
1147 queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1148 rds_message_put(rm);
1149 return 0;
1150
1151out:
1152 if (rm)
1153 rds_message_put(rm);
1154 return ret;
1155}