]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/rds/send.c
RDS: Change send lock from a mutex to a spinlock
[net-next-2.6.git] / net / rds / send.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/gfp.h>
35 #include <net/sock.h>
36 #include <linux/in.h>
37 #include <linux/list.h>
38
39 #include "rds.h"
40
41 /* When transmitting messages in rds_send_xmit, we need to emerge from
42  * time to time and briefly release the CPU. Otherwise the softlock watchdog
43  * will kick our shin.
44  * Also, it seems fairer to not let one busy connection stall all the
45  * others.
46  *
47  * send_batch_count is the number of times we'll loop in send_xmit. Setting
48  * it to 0 will restore the old behavior (where we looped until we had
49  * drained the queue).
50  */
51 static int send_batch_count = 64;
52 module_param(send_batch_count, int, 0444);
53 MODULE_PARM_DESC(send_batch_count, " batch factor when working the send queue");
54
55 /*
56  * Reset the send state. Caller must hold c_send_lock when calling here.
57  */
58 void rds_send_reset(struct rds_connection *conn)
59 {
60         struct rds_message *rm, *tmp;
61         unsigned long flags;
62
63         if (conn->c_xmit_rm) {
64                 /* Tell the user the RDMA op is no longer mapped by the
65                  * transport. This isn't entirely true (it's flushed out
66                  * independently) but as the connection is down, there's
67                  * no ongoing RDMA to/from that memory */
68                 rds_message_unmapped(conn->c_xmit_rm);
69                 rds_message_put(conn->c_xmit_rm);
70                 conn->c_xmit_rm = NULL;
71         }
72         conn->c_xmit_sg = 0;
73         conn->c_xmit_hdr_off = 0;
74         conn->c_xmit_data_off = 0;
75         conn->c_xmit_atomic_sent = 0;
76         conn->c_xmit_rdma_sent = 0;
77         conn->c_xmit_data_sent = 0;
78
79         conn->c_map_queued = 0;
80
81         conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
82         conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
83
84         /* Mark messages as retransmissions, and move them to the send q */
85         spin_lock_irqsave(&conn->c_lock, flags);
86         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
87                 set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
88                 set_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags);
89         }
90         list_splice_init(&conn->c_retrans, &conn->c_send_queue);
91         spin_unlock_irqrestore(&conn->c_lock, flags);
92 }
93
94 /*
95  * We're making the concious trade-off here to only send one message
96  * down the connection at a time.
97  *   Pro:
98  *      - tx queueing is a simple fifo list
99  *      - reassembly is optional and easily done by transports per conn
100  *      - no per flow rx lookup at all, straight to the socket
101  *      - less per-frag memory and wire overhead
102  *   Con:
103  *      - queued acks can be delayed behind large messages
104  *   Depends:
105  *      - small message latency is higher behind queued large messages
106  *      - large message latency isn't starved by intervening small sends
107  */
108 int rds_send_xmit(struct rds_connection *conn)
109 {
110         struct rds_message *rm;
111         unsigned long flags;
112         unsigned int tmp;
113         unsigned int send_quota = send_batch_count;
114         struct scatterlist *sg;
115         int ret = 0;
116         int was_empty = 0;
117         LIST_HEAD(to_be_dropped);
118
119         if (!rds_conn_up(conn))
120                 goto out;
121
122         /*
123          * sendmsg calls here after having queued its message on the send
124          * queue.  We only have one task feeding the connection at a time.  If
125          * another thread is already feeding the queue then we back off.  This
126          * avoids blocking the caller and trading per-connection data between
127          * caches per message.
128          */
129         if (!spin_trylock_irqsave(&conn->c_send_lock, flags)) {
130                 rds_stats_inc(s_send_lock_contention);
131                 ret = -ENOMEM;
132                 goto out;
133         }
134
135         if (conn->c_trans->xmit_prepare)
136                 conn->c_trans->xmit_prepare(conn);
137
138         /*
139          * spin trying to push headers and data down the connection until
140          * the connection doesn't make forward progress.
141          */
142         while (--send_quota) {
143
144                 rm = conn->c_xmit_rm;
145
146                 /*
147                  * If between sending messages, we can send a pending congestion
148                  * map update.
149                  */
150                 if (!rm && test_and_clear_bit(0, &conn->c_map_queued)) {
151                         rm = rds_cong_update_alloc(conn);
152                         if (IS_ERR(rm)) {
153                                 ret = PTR_ERR(rm);
154                                 break;
155                         }
156                         rm->data.op_active = 1;
157
158                         conn->c_xmit_rm = rm;
159                 }
160
161                 /*
162                  * If not already working on one, grab the next message.
163                  *
164                  * c_xmit_rm holds a ref while we're sending this message down
165                  * the connction.  We can use this ref while holding the
166                  * send_sem.. rds_send_reset() is serialized with it.
167                  */
168                 if (!rm) {
169                         unsigned int len;
170
171                         spin_lock_irqsave(&conn->c_lock, flags);
172
173                         if (!list_empty(&conn->c_send_queue)) {
174                                 rm = list_entry(conn->c_send_queue.next,
175                                                 struct rds_message,
176                                                 m_conn_item);
177                                 rds_message_addref(rm);
178
179                                 /*
180                                  * Move the message from the send queue to the retransmit
181                                  * list right away.
182                                  */
183                                 list_move_tail(&rm->m_conn_item, &conn->c_retrans);
184                         }
185
186                         spin_unlock_irqrestore(&conn->c_lock, flags);
187
188                         if (!rm) {
189                                 was_empty = 1;
190                                 break;
191                         }
192
193                         /* Unfortunately, the way Infiniband deals with
194                          * RDMA to a bad MR key is by moving the entire
195                          * queue pair to error state. We cold possibly
196                          * recover from that, but right now we drop the
197                          * connection.
198                          * Therefore, we never retransmit messages with RDMA ops.
199                          */
200                         if (rm->rdma.op_active &&
201                             test_bit(RDS_MSG_RETRANSMITTED, &rm->m_flags)) {
202                                 spin_lock_irqsave(&conn->c_lock, flags);
203                                 if (test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags))
204                                         list_move(&rm->m_conn_item, &to_be_dropped);
205                                 spin_unlock_irqrestore(&conn->c_lock, flags);
206                                 rds_message_put(rm);
207                                 continue;
208                         }
209
210                         /* Require an ACK every once in a while */
211                         len = ntohl(rm->m_inc.i_hdr.h_len);
212                         if (conn->c_unacked_packets == 0 ||
213                             conn->c_unacked_bytes < len) {
214                                 __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
215
216                                 conn->c_unacked_packets = rds_sysctl_max_unacked_packets;
217                                 conn->c_unacked_bytes = rds_sysctl_max_unacked_bytes;
218                                 rds_stats_inc(s_send_ack_required);
219                         } else {
220                                 conn->c_unacked_bytes -= len;
221                                 conn->c_unacked_packets--;
222                         }
223
224                         conn->c_xmit_rm = rm;
225                 }
226
227                 /* The transport either sends the whole rdma or none of it */
228                 if (rm->rdma.op_active && !conn->c_xmit_rdma_sent) {
229                         rm->m_final_op = &rm->rdma;
230                         ret = conn->c_trans->xmit_rdma(conn, &rm->rdma);
231                         if (ret)
232                                 break;
233                         conn->c_xmit_rdma_sent = 1;
234
235                         /* The transport owns the mapped memory for now.
236                          * You can't unmap it while it's on the send queue */
237                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
238                 }
239
240                 if (rm->atomic.op_active && !conn->c_xmit_atomic_sent) {
241                         rm->m_final_op = &rm->atomic;
242                         ret = conn->c_trans->xmit_atomic(conn, &rm->atomic);
243                         if (ret)
244                                 break;
245                         conn->c_xmit_atomic_sent = 1;
246
247                         /* The transport owns the mapped memory for now.
248                          * You can't unmap it while it's on the send queue */
249                         set_bit(RDS_MSG_MAPPED, &rm->m_flags);
250                 }
251
252                 /*
253                  * A number of cases require an RDS header to be sent
254                  * even if there is no data.
255                  * We permit 0-byte sends; rds-ping depends on this.
256                  * However, if there are exclusively attached silent ops,
257                  * we skip the hdr/data send, to enable silent operation.
258                  */
259                 if (rm->data.op_nents == 0) {
260                         int ops_present;
261                         int all_ops_are_silent = 1;
262
263                         ops_present = (rm->atomic.op_active || rm->rdma.op_active);
264                         if (rm->atomic.op_active && !rm->atomic.op_silent)
265                                 all_ops_are_silent = 0;
266                         if (rm->rdma.op_active && !rm->rdma.op_silent)
267                                 all_ops_are_silent = 0;
268
269                         if (ops_present && all_ops_are_silent
270                             && !rm->m_rdma_cookie)
271                                 rm->data.op_active = 0;
272                 }
273
274                 if (rm->data.op_active && !conn->c_xmit_data_sent) {
275                         rm->m_final_op = &rm->data;
276                         ret = conn->c_trans->xmit(conn, rm,
277                                                   conn->c_xmit_hdr_off,
278                                                   conn->c_xmit_sg,
279                                                   conn->c_xmit_data_off);
280                         if (ret <= 0)
281                                 break;
282
283                         if (conn->c_xmit_hdr_off < sizeof(struct rds_header)) {
284                                 tmp = min_t(int, ret,
285                                             sizeof(struct rds_header) -
286                                             conn->c_xmit_hdr_off);
287                                 conn->c_xmit_hdr_off += tmp;
288                                 ret -= tmp;
289                         }
290
291                         sg = &rm->data.op_sg[conn->c_xmit_sg];
292                         while (ret) {
293                                 tmp = min_t(int, ret, sg->length -
294                                                       conn->c_xmit_data_off);
295                                 conn->c_xmit_data_off += tmp;
296                                 ret -= tmp;
297                                 if (conn->c_xmit_data_off == sg->length) {
298                                         conn->c_xmit_data_off = 0;
299                                         sg++;
300                                         conn->c_xmit_sg++;
301                                         BUG_ON(ret != 0 &&
302                                                conn->c_xmit_sg == rm->data.op_nents);
303                                 }
304                         }
305
306                         if (conn->c_xmit_hdr_off == sizeof(struct rds_header) &&
307                             (conn->c_xmit_sg == rm->data.op_nents))
308                                 conn->c_xmit_data_sent = 1;
309                 }
310
311                 /*
312                  * A rm will only take multiple times through this loop
313                  * if there is a data op. Thus, if the data is sent (or there was
314                  * none), then we're done with the rm.
315                  */
316                 if (!rm->data.op_active || conn->c_xmit_data_sent) {
317                         conn->c_xmit_rm = NULL;
318                         conn->c_xmit_sg = 0;
319                         conn->c_xmit_hdr_off = 0;
320                         conn->c_xmit_data_off = 0;
321                         conn->c_xmit_rdma_sent = 0;
322                         conn->c_xmit_atomic_sent = 0;
323                         conn->c_xmit_data_sent = 0;
324
325                         rds_message_put(rm);
326                 }
327         }
328
329         /* Nuke any messages we decided not to retransmit. */
330         if (!list_empty(&to_be_dropped))
331                 rds_send_remove_from_sock(&to_be_dropped, RDS_RDMA_DROPPED);
332
333         if (conn->c_trans->xmit_complete)
334                 conn->c_trans->xmit_complete(conn);
335
336         /*
337          * We might be racing with another sender who queued a message but
338          * backed off on noticing that we held the c_send_lock.  If we check
339          * for queued messages after dropping the sem then either we'll
340          * see the queued message or the queuer will get the sem.  If we
341          * notice the queued message then we trigger an immediate retry.
342          *
343          * We need to be careful only to do this when we stopped processing
344          * the send queue because it was empty.  It's the only way we
345          * stop processing the loop when the transport hasn't taken
346          * responsibility for forward progress.
347          */
348         spin_unlock_irqrestore(&conn->c_send_lock, flags);
349
350         if (send_quota == 0 && !was_empty) {
351                 /* We exhausted the send quota, but there's work left to
352                  * do. Return and (re-)schedule the send worker.
353                  */
354                 ret = -EAGAIN;
355         }
356
357         if (ret == 0 && was_empty) {
358                 /* A simple bit test would be way faster than taking the
359                  * spin lock */
360                 spin_lock_irqsave(&conn->c_lock, flags);
361                 if (!list_empty(&conn->c_send_queue)) {
362                         rds_stats_inc(s_send_lock_queue_raced);
363                         ret = -EAGAIN;
364                 }
365                 spin_unlock_irqrestore(&conn->c_lock, flags);
366         }
367 out:
368         return ret;
369 }
370
371 static void rds_send_sndbuf_remove(struct rds_sock *rs, struct rds_message *rm)
372 {
373         u32 len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
374
375         assert_spin_locked(&rs->rs_lock);
376
377         BUG_ON(rs->rs_snd_bytes < len);
378         rs->rs_snd_bytes -= len;
379
380         if (rs->rs_snd_bytes == 0)
381                 rds_stats_inc(s_send_queue_empty);
382 }
383
384 static inline int rds_send_is_acked(struct rds_message *rm, u64 ack,
385                                     is_acked_func is_acked)
386 {
387         if (is_acked)
388                 return is_acked(rm, ack);
389         return be64_to_cpu(rm->m_inc.i_hdr.h_sequence) <= ack;
390 }
391
392 /*
393  * Returns true if there are no messages on the send and retransmit queues
394  * which have a sequence number greater than or equal to the given sequence
395  * number.
396  */
397 int rds_send_acked_before(struct rds_connection *conn, u64 seq)
398 {
399         struct rds_message *rm, *tmp;
400         int ret = 1;
401
402         spin_lock(&conn->c_lock);
403
404         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
405                 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
406                         ret = 0;
407                 break;
408         }
409
410         list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
411                 if (be64_to_cpu(rm->m_inc.i_hdr.h_sequence) < seq)
412                         ret = 0;
413                 break;
414         }
415
416         spin_unlock(&conn->c_lock);
417
418         return ret;
419 }
420
421 /*
422  * This is pretty similar to what happens below in the ACK
423  * handling code - except that we call here as soon as we get
424  * the IB send completion on the RDMA op and the accompanying
425  * message.
426  */
427 void rds_rdma_send_complete(struct rds_message *rm, int status)
428 {
429         struct rds_sock *rs = NULL;
430         struct rm_rdma_op *ro;
431         struct rds_notifier *notifier;
432         unsigned long flags;
433
434         spin_lock_irqsave(&rm->m_rs_lock, flags);
435
436         ro = &rm->rdma;
437         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags) &&
438             ro->op_active && ro->op_notify && ro->op_notifier) {
439                 notifier = ro->op_notifier;
440                 rs = rm->m_rs;
441                 sock_hold(rds_rs_to_sk(rs));
442
443                 notifier->n_status = status;
444                 spin_lock(&rs->rs_lock);
445                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
446                 spin_unlock(&rs->rs_lock);
447
448                 ro->op_notifier = NULL;
449         }
450
451         spin_unlock_irqrestore(&rm->m_rs_lock, flags);
452
453         if (rs) {
454                 rds_wake_sk_sleep(rs);
455                 sock_put(rds_rs_to_sk(rs));
456         }
457 }
458 EXPORT_SYMBOL_GPL(rds_rdma_send_complete);
459
460 /*
461  * Just like above, except looks at atomic op
462  */
463 void rds_atomic_send_complete(struct rds_message *rm, int status)
464 {
465         struct rds_sock *rs = NULL;
466         struct rm_atomic_op *ao;
467         struct rds_notifier *notifier;
468
469         spin_lock(&rm->m_rs_lock);
470
471         ao = &rm->atomic;
472         if (test_bit(RDS_MSG_ON_SOCK, &rm->m_flags)
473             && ao->op_active && ao->op_notify && ao->op_notifier) {
474                 notifier = ao->op_notifier;
475                 rs = rm->m_rs;
476                 sock_hold(rds_rs_to_sk(rs));
477
478                 notifier->n_status = status;
479                 spin_lock(&rs->rs_lock);
480                 list_add_tail(&notifier->n_list, &rs->rs_notify_queue);
481                 spin_unlock(&rs->rs_lock);
482
483                 ao->op_notifier = NULL;
484         }
485
486         spin_unlock(&rm->m_rs_lock);
487
488         if (rs) {
489                 rds_wake_sk_sleep(rs);
490                 sock_put(rds_rs_to_sk(rs));
491         }
492 }
493 EXPORT_SYMBOL_GPL(rds_atomic_send_complete);
494
495 /*
496  * This is the same as rds_rdma_send_complete except we
497  * don't do any locking - we have all the ingredients (message,
498  * socket, socket lock) and can just move the notifier.
499  */
500 static inline void
501 __rds_send_complete(struct rds_sock *rs, struct rds_message *rm, int status)
502 {
503         struct rm_rdma_op *ro;
504         struct rm_atomic_op *ao;
505
506         ro = &rm->rdma;
507         if (ro->op_active && ro->op_notify && ro->op_notifier) {
508                 ro->op_notifier->n_status = status;
509                 list_add_tail(&ro->op_notifier->n_list, &rs->rs_notify_queue);
510                 ro->op_notifier = NULL;
511         }
512
513         ao = &rm->atomic;
514         if (ao->op_active && ao->op_notify && ao->op_notifier) {
515                 ao->op_notifier->n_status = status;
516                 list_add_tail(&ao->op_notifier->n_list, &rs->rs_notify_queue);
517                 ao->op_notifier = NULL;
518         }
519
520         /* No need to wake the app - caller does this */
521 }
522
523 /*
524  * This is called from the IB send completion when we detect
525  * a RDMA operation that failed with remote access error.
526  * So speed is not an issue here.
527  */
528 struct rds_message *rds_send_get_message(struct rds_connection *conn,
529                                          struct rm_rdma_op *op)
530 {
531         struct rds_message *rm, *tmp, *found = NULL;
532         unsigned long flags;
533
534         spin_lock_irqsave(&conn->c_lock, flags);
535
536         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
537                 if (&rm->rdma == op) {
538                         atomic_inc(&rm->m_refcount);
539                         found = rm;
540                         goto out;
541                 }
542         }
543
544         list_for_each_entry_safe(rm, tmp, &conn->c_send_queue, m_conn_item) {
545                 if (&rm->rdma == op) {
546                         atomic_inc(&rm->m_refcount);
547                         found = rm;
548                         break;
549                 }
550         }
551
552 out:
553         spin_unlock_irqrestore(&conn->c_lock, flags);
554
555         return found;
556 }
557 EXPORT_SYMBOL_GPL(rds_send_get_message);
558
559 /*
560  * This removes messages from the socket's list if they're on it.  The list
561  * argument must be private to the caller, we must be able to modify it
562  * without locks.  The messages must have a reference held for their
563  * position on the list.  This function will drop that reference after
564  * removing the messages from the 'messages' list regardless of if it found
565  * the messages on the socket list or not.
566  */
567 void rds_send_remove_from_sock(struct list_head *messages, int status)
568 {
569         unsigned long flags;
570         struct rds_sock *rs = NULL;
571         struct rds_message *rm;
572
573         while (!list_empty(messages)) {
574                 int was_on_sock = 0;
575
576                 rm = list_entry(messages->next, struct rds_message,
577                                 m_conn_item);
578                 list_del_init(&rm->m_conn_item);
579
580                 /*
581                  * If we see this flag cleared then we're *sure* that someone
582                  * else beat us to removing it from the sock.  If we race
583                  * with their flag update we'll get the lock and then really
584                  * see that the flag has been cleared.
585                  *
586                  * The message spinlock makes sure nobody clears rm->m_rs
587                  * while we're messing with it. It does not prevent the
588                  * message from being removed from the socket, though.
589                  */
590                 spin_lock_irqsave(&rm->m_rs_lock, flags);
591                 if (!test_bit(RDS_MSG_ON_SOCK, &rm->m_flags))
592                         goto unlock_and_drop;
593
594                 if (rs != rm->m_rs) {
595                         if (rs) {
596                                 rds_wake_sk_sleep(rs);
597                                 sock_put(rds_rs_to_sk(rs));
598                         }
599                         rs = rm->m_rs;
600                         sock_hold(rds_rs_to_sk(rs));
601                 }
602                 spin_lock(&rs->rs_lock);
603
604                 if (test_and_clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags)) {
605                         struct rm_rdma_op *ro = &rm->rdma;
606                         struct rds_notifier *notifier;
607
608                         list_del_init(&rm->m_sock_item);
609                         rds_send_sndbuf_remove(rs, rm);
610
611                         if (ro->op_active && ro->op_notifier &&
612                                (ro->op_notify || (ro->op_recverr && status))) {
613                                 notifier = ro->op_notifier;
614                                 list_add_tail(&notifier->n_list,
615                                                 &rs->rs_notify_queue);
616                                 if (!notifier->n_status)
617                                         notifier->n_status = status;
618                                 rm->rdma.op_notifier = NULL;
619                         }
620                         was_on_sock = 1;
621                         rm->m_rs = NULL;
622                 }
623                 spin_unlock(&rs->rs_lock);
624
625 unlock_and_drop:
626                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
627                 rds_message_put(rm);
628                 if (was_on_sock)
629                         rds_message_put(rm);
630         }
631
632         if (rs) {
633                 rds_wake_sk_sleep(rs);
634                 sock_put(rds_rs_to_sk(rs));
635         }
636 }
637
638 /*
639  * Transports call here when they've determined that the receiver queued
640  * messages up to, and including, the given sequence number.  Messages are
641  * moved to the retrans queue when rds_send_xmit picks them off the send
642  * queue. This means that in the TCP case, the message may not have been
643  * assigned the m_ack_seq yet - but that's fine as long as tcp_is_acked
644  * checks the RDS_MSG_HAS_ACK_SEQ bit.
645  *
646  * XXX It's not clear to me how this is safely serialized with socket
647  * destruction.  Maybe it should bail if it sees SOCK_DEAD.
648  */
649 void rds_send_drop_acked(struct rds_connection *conn, u64 ack,
650                          is_acked_func is_acked)
651 {
652         struct rds_message *rm, *tmp;
653         unsigned long flags;
654         LIST_HEAD(list);
655
656         spin_lock_irqsave(&conn->c_lock, flags);
657
658         list_for_each_entry_safe(rm, tmp, &conn->c_retrans, m_conn_item) {
659                 if (!rds_send_is_acked(rm, ack, is_acked))
660                         break;
661
662                 list_move(&rm->m_conn_item, &list);
663                 clear_bit(RDS_MSG_ON_CONN, &rm->m_flags);
664         }
665
666         /* order flag updates with spin locks */
667         if (!list_empty(&list))
668                 smp_mb__after_clear_bit();
669
670         spin_unlock_irqrestore(&conn->c_lock, flags);
671
672         /* now remove the messages from the sock list as needed */
673         rds_send_remove_from_sock(&list, RDS_RDMA_SUCCESS);
674 }
675 EXPORT_SYMBOL_GPL(rds_send_drop_acked);
676
677 void rds_send_drop_to(struct rds_sock *rs, struct sockaddr_in *dest)
678 {
679         struct rds_message *rm, *tmp;
680         struct rds_connection *conn;
681         unsigned long flags;
682         LIST_HEAD(list);
683
684         /* get all the messages we're dropping under the rs lock */
685         spin_lock_irqsave(&rs->rs_lock, flags);
686
687         list_for_each_entry_safe(rm, tmp, &rs->rs_send_queue, m_sock_item) {
688                 if (dest && (dest->sin_addr.s_addr != rm->m_daddr ||
689                              dest->sin_port != rm->m_inc.i_hdr.h_dport))
690                         continue;
691
692                 list_move(&rm->m_sock_item, &list);
693                 rds_send_sndbuf_remove(rs, rm);
694                 clear_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
695         }
696
697         /* order flag updates with the rs lock */
698         smp_mb__after_clear_bit();
699
700         spin_unlock_irqrestore(&rs->rs_lock, flags);
701
702         if (list_empty(&list))
703                 return;
704
705         /* Remove the messages from the conn */
706         list_for_each_entry(rm, &list, m_sock_item) {
707
708                 conn = rm->m_inc.i_conn;
709
710                 spin_lock_irqsave(&conn->c_lock, flags);
711                 /*
712                  * Maybe someone else beat us to removing rm from the conn.
713                  * If we race with their flag update we'll get the lock and
714                  * then really see that the flag has been cleared.
715                  */
716                 if (!test_and_clear_bit(RDS_MSG_ON_CONN, &rm->m_flags)) {
717                         spin_unlock_irqrestore(&conn->c_lock, flags);
718                         continue;
719                 }
720                 list_del_init(&rm->m_conn_item);
721                 spin_unlock_irqrestore(&conn->c_lock, flags);
722
723                 /*
724                  * Couldn't grab m_rs_lock in top loop (lock ordering),
725                  * but we can now.
726                  */
727                 spin_lock_irqsave(&rm->m_rs_lock, flags);
728
729                 spin_lock(&rs->rs_lock);
730                 __rds_send_complete(rs, rm, RDS_RDMA_CANCELED);
731                 spin_unlock(&rs->rs_lock);
732
733                 rm->m_rs = NULL;
734                 spin_unlock_irqrestore(&rm->m_rs_lock, flags);
735
736                 rds_message_put(rm);
737         }
738
739         rds_wake_sk_sleep(rs);
740
741         while (!list_empty(&list)) {
742                 rm = list_entry(list.next, struct rds_message, m_sock_item);
743                 list_del_init(&rm->m_sock_item);
744
745                 rds_message_wait(rm);
746                 rds_message_put(rm);
747         }
748 }
749
750 /*
751  * we only want this to fire once so we use the callers 'queued'.  It's
752  * possible that another thread can race with us and remove the
753  * message from the flow with RDS_CANCEL_SENT_TO.
754  */
755 static int rds_send_queue_rm(struct rds_sock *rs, struct rds_connection *conn,
756                              struct rds_message *rm, __be16 sport,
757                              __be16 dport, int *queued)
758 {
759         unsigned long flags;
760         u32 len;
761
762         if (*queued)
763                 goto out;
764
765         len = be32_to_cpu(rm->m_inc.i_hdr.h_len);
766
767         /* this is the only place which holds both the socket's rs_lock
768          * and the connection's c_lock */
769         spin_lock_irqsave(&rs->rs_lock, flags);
770
771         /*
772          * If there is a little space in sndbuf, we don't queue anything,
773          * and userspace gets -EAGAIN. But poll() indicates there's send
774          * room. This can lead to bad behavior (spinning) if snd_bytes isn't
775          * freed up by incoming acks. So we check the *old* value of
776          * rs_snd_bytes here to allow the last msg to exceed the buffer,
777          * and poll() now knows no more data can be sent.
778          */
779         if (rs->rs_snd_bytes < rds_sk_sndbuf(rs)) {
780                 rs->rs_snd_bytes += len;
781
782                 /* let recv side know we are close to send space exhaustion.
783                  * This is probably not the optimal way to do it, as this
784                  * means we set the flag on *all* messages as soon as our
785                  * throughput hits a certain threshold.
786                  */
787                 if (rs->rs_snd_bytes >= rds_sk_sndbuf(rs) / 2)
788                         __set_bit(RDS_MSG_ACK_REQUIRED, &rm->m_flags);
789
790                 list_add_tail(&rm->m_sock_item, &rs->rs_send_queue);
791                 set_bit(RDS_MSG_ON_SOCK, &rm->m_flags);
792                 rds_message_addref(rm);
793                 rm->m_rs = rs;
794
795                 /* The code ordering is a little weird, but we're
796                    trying to minimize the time we hold c_lock */
797                 rds_message_populate_header(&rm->m_inc.i_hdr, sport, dport, 0);
798                 rm->m_inc.i_conn = conn;
799                 rds_message_addref(rm);
800
801                 spin_lock(&conn->c_lock);
802                 rm->m_inc.i_hdr.h_sequence = cpu_to_be64(conn->c_next_tx_seq++);
803                 list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
804                 set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
805                 spin_unlock(&conn->c_lock);
806
807                 rdsdebug("queued msg %p len %d, rs %p bytes %d seq %llu\n",
808                          rm, len, rs, rs->rs_snd_bytes,
809                          (unsigned long long)be64_to_cpu(rm->m_inc.i_hdr.h_sequence));
810
811                 *queued = 1;
812         }
813
814         spin_unlock_irqrestore(&rs->rs_lock, flags);
815 out:
816         return *queued;
817 }
818
819 /*
820  * rds_message is getting to be quite complicated, and we'd like to allocate
821  * it all in one go. This figures out how big it needs to be up front.
822  */
823 static int rds_rm_size(struct msghdr *msg, int data_len)
824 {
825         struct cmsghdr *cmsg;
826         int size = 0;
827         int cmsg_groups = 0;
828         int retval;
829
830         for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
831                 if (!CMSG_OK(msg, cmsg))
832                         return -EINVAL;
833
834                 if (cmsg->cmsg_level != SOL_RDS)
835                         continue;
836
837                 switch (cmsg->cmsg_type) {
838                 case RDS_CMSG_RDMA_ARGS:
839                         cmsg_groups |= 1;
840                         retval = rds_rdma_extra_size(CMSG_DATA(cmsg));
841                         if (retval < 0)
842                                 return retval;
843                         size += retval;
844
845                         break;
846
847                 case RDS_CMSG_RDMA_DEST:
848                 case RDS_CMSG_RDMA_MAP:
849                         cmsg_groups |= 2;
850                         /* these are valid but do no add any size */
851                         break;
852
853                 case RDS_CMSG_ATOMIC_CSWP:
854                 case RDS_CMSG_ATOMIC_FADD:
855                         cmsg_groups |= 1;
856                         size += sizeof(struct scatterlist);
857                         break;
858
859                 default:
860                         return -EINVAL;
861                 }
862
863         }
864
865         size += ceil(data_len, PAGE_SIZE) * sizeof(struct scatterlist);
866
867         /* Ensure (DEST, MAP) are never used with (ARGS, ATOMIC) */
868         if (cmsg_groups == 3)
869                 return -EINVAL;
870
871         return size;
872 }
873
874 static int rds_cmsg_send(struct rds_sock *rs, struct rds_message *rm,
875                          struct msghdr *msg, int *allocated_mr)
876 {
877         struct cmsghdr *cmsg;
878         int ret = 0;
879
880         for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
881                 if (!CMSG_OK(msg, cmsg))
882                         return -EINVAL;
883
884                 if (cmsg->cmsg_level != SOL_RDS)
885                         continue;
886
887                 /* As a side effect, RDMA_DEST and RDMA_MAP will set
888                  * rm->rdma.m_rdma_cookie and rm->rdma.m_rdma_mr.
889                  */
890                 switch (cmsg->cmsg_type) {
891                 case RDS_CMSG_RDMA_ARGS:
892                         ret = rds_cmsg_rdma_args(rs, rm, cmsg);
893                         break;
894
895                 case RDS_CMSG_RDMA_DEST:
896                         ret = rds_cmsg_rdma_dest(rs, rm, cmsg);
897                         break;
898
899                 case RDS_CMSG_RDMA_MAP:
900                         ret = rds_cmsg_rdma_map(rs, rm, cmsg);
901                         if (!ret)
902                                 *allocated_mr = 1;
903                         break;
904                 case RDS_CMSG_ATOMIC_CSWP:
905                 case RDS_CMSG_ATOMIC_FADD:
906                         ret = rds_cmsg_atomic(rs, rm, cmsg);
907                         break;
908
909                 default:
910                         return -EINVAL;
911                 }
912
913                 if (ret)
914                         break;
915         }
916
917         return ret;
918 }
919
920 int rds_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
921                 size_t payload_len)
922 {
923         struct sock *sk = sock->sk;
924         struct rds_sock *rs = rds_sk_to_rs(sk);
925         struct sockaddr_in *usin = (struct sockaddr_in *)msg->msg_name;
926         __be32 daddr;
927         __be16 dport;
928         struct rds_message *rm = NULL;
929         struct rds_connection *conn;
930         int ret = 0;
931         int queued = 0, allocated_mr = 0;
932         int nonblock = msg->msg_flags & MSG_DONTWAIT;
933         long timeo = sock_sndtimeo(sk, nonblock);
934
935         /* Mirror Linux UDP mirror of BSD error message compatibility */
936         /* XXX: Perhaps MSG_MORE someday */
937         if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_CMSG_COMPAT)) {
938                 printk(KERN_INFO "msg_flags 0x%08X\n", msg->msg_flags);
939                 ret = -EOPNOTSUPP;
940                 goto out;
941         }
942
943         if (msg->msg_namelen) {
944                 /* XXX fail non-unicast destination IPs? */
945                 if (msg->msg_namelen < sizeof(*usin) || usin->sin_family != AF_INET) {
946                         ret = -EINVAL;
947                         goto out;
948                 }
949                 daddr = usin->sin_addr.s_addr;
950                 dport = usin->sin_port;
951         } else {
952                 /* We only care about consistency with ->connect() */
953                 lock_sock(sk);
954                 daddr = rs->rs_conn_addr;
955                 dport = rs->rs_conn_port;
956                 release_sock(sk);
957         }
958
959         /* racing with another thread binding seems ok here */
960         if (daddr == 0 || rs->rs_bound_addr == 0) {
961                 ret = -ENOTCONN; /* XXX not a great errno */
962                 goto out;
963         }
964
965         /* size of rm including all sgs */
966         ret = rds_rm_size(msg, payload_len);
967         if (ret < 0)
968                 goto out;
969
970         rm = rds_message_alloc(ret, GFP_KERNEL);
971         if (!rm) {
972                 ret = -ENOMEM;
973                 goto out;
974         }
975
976         /* Attach data to the rm */
977         if (payload_len) {
978                 rm->data.op_sg = rds_message_alloc_sgs(rm, ceil(payload_len, PAGE_SIZE));
979                 ret = rds_message_copy_from_user(rm, msg->msg_iov, payload_len);
980                 if (ret)
981                         goto out;
982         }
983         rm->data.op_active = 1;
984
985         rm->m_daddr = daddr;
986
987         /* rds_conn_create has a spinlock that runs with IRQ off.
988          * Caching the conn in the socket helps a lot. */
989         if (rs->rs_conn && rs->rs_conn->c_faddr == daddr)
990                 conn = rs->rs_conn;
991         else {
992                 conn = rds_conn_create_outgoing(rs->rs_bound_addr, daddr,
993                                         rs->rs_transport,
994                                         sock->sk->sk_allocation);
995                 if (IS_ERR(conn)) {
996                         ret = PTR_ERR(conn);
997                         goto out;
998                 }
999                 rs->rs_conn = conn;
1000         }
1001
1002         /* Parse any control messages the user may have included. */
1003         ret = rds_cmsg_send(rs, rm, msg, &allocated_mr);
1004         if (ret)
1005                 goto out;
1006
1007         if (rm->rdma.op_active && !conn->c_trans->xmit_rdma) {
1008                 if (printk_ratelimit())
1009                         printk(KERN_NOTICE "rdma_op %p conn xmit_rdma %p\n",
1010                                &rm->rdma, conn->c_trans->xmit_rdma);
1011                 ret = -EOPNOTSUPP;
1012                 goto out;
1013         }
1014
1015         if (rm->atomic.op_active && !conn->c_trans->xmit_atomic) {
1016                 if (printk_ratelimit())
1017                         printk(KERN_NOTICE "atomic_op %p conn xmit_atomic %p\n",
1018                                &rm->atomic, conn->c_trans->xmit_atomic);
1019                 ret = -EOPNOTSUPP;
1020                 goto out;
1021         }
1022
1023         /* If the connection is down, trigger a connect. We may
1024          * have scheduled a delayed reconnect however - in this case
1025          * we should not interfere.
1026          */
1027         if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1028             !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1029                 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1030
1031         ret = rds_cong_wait(conn->c_fcong, dport, nonblock, rs);
1032         if (ret) {
1033                 rs->rs_seen_congestion = 1;
1034                 goto out;
1035         }
1036
1037         while (!rds_send_queue_rm(rs, conn, rm, rs->rs_bound_port,
1038                                   dport, &queued)) {
1039                 rds_stats_inc(s_send_queue_full);
1040                 /* XXX make sure this is reasonable */
1041                 if (payload_len > rds_sk_sndbuf(rs)) {
1042                         ret = -EMSGSIZE;
1043                         goto out;
1044                 }
1045                 if (nonblock) {
1046                         ret = -EAGAIN;
1047                         goto out;
1048                 }
1049
1050                 timeo = wait_event_interruptible_timeout(*sk_sleep(sk),
1051                                         rds_send_queue_rm(rs, conn, rm,
1052                                                           rs->rs_bound_port,
1053                                                           dport,
1054                                                           &queued),
1055                                         timeo);
1056                 rdsdebug("sendmsg woke queued %d timeo %ld\n", queued, timeo);
1057                 if (timeo > 0 || timeo == MAX_SCHEDULE_TIMEOUT)
1058                         continue;
1059
1060                 ret = timeo;
1061                 if (ret == 0)
1062                         ret = -ETIMEDOUT;
1063                 goto out;
1064         }
1065
1066         /*
1067          * By now we've committed to the send.  We reuse rds_send_worker()
1068          * to retry sends in the rds thread if the transport asks us to.
1069          */
1070         rds_stats_inc(s_send_queued);
1071
1072         if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags))
1073                 rds_send_worker(&conn->c_send_w.work);
1074
1075         rds_message_put(rm);
1076         return payload_len;
1077
1078 out:
1079         /* If the user included a RDMA_MAP cmsg, we allocated a MR on the fly.
1080          * If the sendmsg goes through, we keep the MR. If it fails with EAGAIN
1081          * or in any other way, we need to destroy the MR again */
1082         if (allocated_mr)
1083                 rds_rdma_unuse(rs, rds_rdma_cookie_key(rm->m_rdma_cookie), 1);
1084
1085         if (rm)
1086                 rds_message_put(rm);
1087         return ret;
1088 }
1089
1090 /*
1091  * Reply to a ping packet.
1092  */
1093 int
1094 rds_send_pong(struct rds_connection *conn, __be16 dport)
1095 {
1096         struct rds_message *rm;
1097         unsigned long flags;
1098         int ret = 0;
1099
1100         rm = rds_message_alloc(0, GFP_ATOMIC);
1101         if (!rm) {
1102                 ret = -ENOMEM;
1103                 goto out;
1104         }
1105
1106         rm->m_daddr = conn->c_faddr;
1107
1108         /* If the connection is down, trigger a connect. We may
1109          * have scheduled a delayed reconnect however - in this case
1110          * we should not interfere.
1111          */
1112         if (rds_conn_state(conn) == RDS_CONN_DOWN &&
1113             !test_and_set_bit(RDS_RECONNECT_PENDING, &conn->c_flags))
1114                 queue_delayed_work(rds_wq, &conn->c_conn_w, 0);
1115
1116         ret = rds_cong_wait(conn->c_fcong, dport, 1, NULL);
1117         if (ret)
1118                 goto out;
1119
1120         spin_lock_irqsave(&conn->c_lock, flags);
1121         list_add_tail(&rm->m_conn_item, &conn->c_send_queue);
1122         set_bit(RDS_MSG_ON_CONN, &rm->m_flags);
1123         rds_message_addref(rm);
1124         rm->m_inc.i_conn = conn;
1125
1126         rds_message_populate_header(&rm->m_inc.i_hdr, 0, dport,
1127                                     conn->c_next_tx_seq);
1128         conn->c_next_tx_seq++;
1129         spin_unlock_irqrestore(&conn->c_lock, flags);
1130
1131         rds_stats_inc(s_send_queued);
1132         rds_stats_inc(s_send_pong);
1133
1134         queue_delayed_work(rds_wq, &conn->c_send_w, 0);
1135         rds_message_put(rm);
1136         return 0;
1137
1138 out:
1139         if (rm)
1140                 rds_message_put(rm);
1141         return ret;
1142 }