1 #include "ceph_debug.h"
3 #include <linux/crc32c.h>
4 #include <linux/ctype.h>
5 #include <linux/highmem.h>
6 #include <linux/inet.h>
7 #include <linux/kthread.h>
9 #include <linux/slab.h>
10 #include <linux/socket.h>
11 #include <linux/string.h>
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
17 #include "messenger.h"
22 * Ceph uses the messenger to exchange ceph_msg messages with other
23 * hosts in the system. The messenger provides ordered and reliable
24 * delivery. We tolerate TCP disconnects by reconnecting (with
25 * exponential backoff) in the case of a fault (disconnection, bad
26 * crc, protocol error). Acks allow sent messages to be discarded by
30 /* static tag bytes (protocol control messages) */
31 static char tag_msg = CEPH_MSGR_TAG_MSG;
32 static char tag_ack = CEPH_MSGR_TAG_ACK;
33 static char tag_keepalive = CEPH_MSGR_TAG_KEEPALIVE;
36 static struct lock_class_key socket_class;
40 static void queue_con(struct ceph_connection *con);
41 static void con_work(struct work_struct *);
42 static void ceph_fault(struct ceph_connection *con);
45 * nicely render a sockaddr as a string.
47 #define MAX_ADDR_STR 20
48 #define MAX_ADDR_STR_LEN 60
49 static char addr_str[MAX_ADDR_STR][MAX_ADDR_STR_LEN];
50 static DEFINE_SPINLOCK(addr_str_lock);
51 static int last_addr_str;
53 const char *pr_addr(const struct sockaddr_storage *ss)
57 struct sockaddr_in *in4 = (void *)ss;
58 struct sockaddr_in6 *in6 = (void *)ss;
60 spin_lock(&addr_str_lock);
62 if (last_addr_str == MAX_ADDR_STR)
64 spin_unlock(&addr_str_lock);
67 switch (ss->ss_family) {
69 snprintf(s, MAX_ADDR_STR_LEN, "%pI4:%u", &in4->sin_addr,
70 (unsigned int)ntohs(in4->sin_port));
74 snprintf(s, MAX_ADDR_STR_LEN, "[%pI6c]:%u", &in6->sin6_addr,
75 (unsigned int)ntohs(in6->sin6_port));
79 sprintf(s, "(unknown sockaddr family %d)", (int)ss->ss_family);
85 static void encode_my_addr(struct ceph_messenger *msgr)
87 memcpy(&msgr->my_enc_addr, &msgr->inst.addr, sizeof(msgr->my_enc_addr));
88 ceph_encode_addr(&msgr->my_enc_addr);
92 * work queue for all reading and writing to/from the socket.
94 struct workqueue_struct *ceph_msgr_wq;
96 int __init ceph_msgr_init(void)
98 ceph_msgr_wq = create_workqueue("ceph-msgr");
99 if (IS_ERR(ceph_msgr_wq)) {
100 int ret = PTR_ERR(ceph_msgr_wq);
101 pr_err("msgr_init failed to create workqueue: %d\n", ret);
108 void ceph_msgr_exit(void)
110 destroy_workqueue(ceph_msgr_wq);
113 void ceph_msgr_flush(void)
115 flush_workqueue(ceph_msgr_wq);
120 * socket callback functions
123 /* data available on socket, or listen socket received a connect */
124 static void ceph_data_ready(struct sock *sk, int count_unused)
126 struct ceph_connection *con =
127 (struct ceph_connection *)sk->sk_user_data;
128 if (sk->sk_state != TCP_CLOSE_WAIT) {
129 dout("ceph_data_ready on %p state = %lu, queueing work\n",
135 /* socket has buffer space for writing */
136 static void ceph_write_space(struct sock *sk)
138 struct ceph_connection *con =
139 (struct ceph_connection *)sk->sk_user_data;
141 /* only queue to workqueue if there is data we want to write. */
142 if (test_bit(WRITE_PENDING, &con->state)) {
143 dout("ceph_write_space %p queueing write work\n", con);
146 dout("ceph_write_space %p nothing to write\n", con);
149 /* since we have our own write_space, clear the SOCK_NOSPACE flag */
150 clear_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
153 /* socket's state has changed */
154 static void ceph_state_change(struct sock *sk)
156 struct ceph_connection *con =
157 (struct ceph_connection *)sk->sk_user_data;
159 dout("ceph_state_change %p state = %lu sk_state = %u\n",
160 con, con->state, sk->sk_state);
162 if (test_bit(CLOSED, &con->state))
165 switch (sk->sk_state) {
167 dout("ceph_state_change TCP_CLOSE\n");
169 dout("ceph_state_change TCP_CLOSE_WAIT\n");
170 if (test_and_set_bit(SOCK_CLOSED, &con->state) == 0) {
171 if (test_bit(CONNECTING, &con->state))
172 con->error_msg = "connection failed";
174 con->error_msg = "socket closed";
178 case TCP_ESTABLISHED:
179 dout("ceph_state_change TCP_ESTABLISHED\n");
186 * set up socket callbacks
188 static void set_sock_callbacks(struct socket *sock,
189 struct ceph_connection *con)
191 struct sock *sk = sock->sk;
192 sk->sk_user_data = (void *)con;
193 sk->sk_data_ready = ceph_data_ready;
194 sk->sk_write_space = ceph_write_space;
195 sk->sk_state_change = ceph_state_change;
204 * initiate connection to a remote socket.
206 static struct socket *ceph_tcp_connect(struct ceph_connection *con)
208 struct sockaddr_storage *paddr = &con->peer_addr.in_addr;
213 ret = sock_create_kern(con->peer_addr.in_addr.ss_family, SOCK_STREAM,
218 sock->sk->sk_allocation = GFP_NOFS;
220 #ifdef CONFIG_LOCKDEP
221 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
224 set_sock_callbacks(sock, con);
226 dout("connect %s\n", pr_addr(&con->peer_addr.in_addr));
228 ret = sock->ops->connect(sock, (struct sockaddr *)paddr, sizeof(*paddr),
230 if (ret == -EINPROGRESS) {
231 dout("connect %s EINPROGRESS sk_state = %u\n",
232 pr_addr(&con->peer_addr.in_addr),
237 pr_err("connect %s error %d\n",
238 pr_addr(&con->peer_addr.in_addr), ret);
241 con->error_msg = "connect error";
249 static int ceph_tcp_recvmsg(struct socket *sock, void *buf, size_t len)
251 struct kvec iov = {buf, len};
252 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
254 return kernel_recvmsg(sock, &msg, &iov, 1, len, msg.msg_flags);
258 * write something. @more is true if caller will be sending more data
261 static int ceph_tcp_sendmsg(struct socket *sock, struct kvec *iov,
262 size_t kvlen, size_t len, int more)
264 struct msghdr msg = { .msg_flags = MSG_DONTWAIT | MSG_NOSIGNAL };
267 msg.msg_flags |= MSG_MORE;
269 msg.msg_flags |= MSG_EOR; /* superfluous, but what the hell */
271 return kernel_sendmsg(sock, &msg, iov, kvlen, len);
276 * Shutdown/close the socket for the given connection.
278 static int con_close_socket(struct ceph_connection *con)
282 dout("con_close_socket on %p sock %p\n", con, con->sock);
285 set_bit(SOCK_CLOSED, &con->state);
286 rc = con->sock->ops->shutdown(con->sock, SHUT_RDWR);
287 sock_release(con->sock);
289 clear_bit(SOCK_CLOSED, &con->state);
294 * Reset a connection. Discard all incoming and outgoing messages
295 * and clear *_seq state.
297 static void ceph_msg_remove(struct ceph_msg *msg)
299 list_del_init(&msg->list_head);
302 static void ceph_msg_remove_list(struct list_head *head)
304 while (!list_empty(head)) {
305 struct ceph_msg *msg = list_first_entry(head, struct ceph_msg,
307 ceph_msg_remove(msg);
311 static void reset_connection(struct ceph_connection *con)
313 /* reset connection, out_queue, msg_ and connect_seq */
314 /* discard existing out_queue and msg_seq */
315 ceph_msg_remove_list(&con->out_queue);
316 ceph_msg_remove_list(&con->out_sent);
319 ceph_msg_put(con->in_msg);
323 con->connect_seq = 0;
326 ceph_msg_put(con->out_msg);
329 con->out_keepalive_pending = false;
331 con->in_seq_acked = 0;
335 * mark a peer down. drop any open connections.
337 void ceph_con_close(struct ceph_connection *con)
339 dout("con_close %p peer %s\n", con, pr_addr(&con->peer_addr.in_addr));
340 set_bit(CLOSED, &con->state); /* in case there's queued work */
341 clear_bit(STANDBY, &con->state); /* avoid connect_seq bump */
342 clear_bit(LOSSYTX, &con->state); /* so we retry next connect */
343 clear_bit(KEEPALIVE_PENDING, &con->state);
344 clear_bit(WRITE_PENDING, &con->state);
345 mutex_lock(&con->mutex);
346 reset_connection(con);
347 con->peer_global_seq = 0;
348 cancel_delayed_work(&con->work);
349 mutex_unlock(&con->mutex);
354 * Reopen a closed connection, with a new peer address.
356 void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
358 dout("con_open %p %s\n", con, pr_addr(&addr->in_addr));
359 set_bit(OPENING, &con->state);
360 clear_bit(CLOSED, &con->state);
361 memcpy(&con->peer_addr, addr, sizeof(*addr));
362 con->delay = 0; /* reset backoff memory */
367 * return true if this connection ever successfully opened
369 bool ceph_con_opened(struct ceph_connection *con)
371 return con->connect_seq > 0;
377 struct ceph_connection *ceph_con_get(struct ceph_connection *con)
379 dout("con_get %p nref = %d -> %d\n", con,
380 atomic_read(&con->nref), atomic_read(&con->nref) + 1);
381 if (atomic_inc_not_zero(&con->nref))
386 void ceph_con_put(struct ceph_connection *con)
388 dout("con_put %p nref = %d -> %d\n", con,
389 atomic_read(&con->nref), atomic_read(&con->nref) - 1);
390 BUG_ON(atomic_read(&con->nref) == 0);
391 if (atomic_dec_and_test(&con->nref)) {
398 * initialize a new connection.
400 void ceph_con_init(struct ceph_messenger *msgr, struct ceph_connection *con)
402 dout("con_init %p\n", con);
403 memset(con, 0, sizeof(*con));
404 atomic_set(&con->nref, 1);
406 mutex_init(&con->mutex);
407 INIT_LIST_HEAD(&con->out_queue);
408 INIT_LIST_HEAD(&con->out_sent);
409 INIT_DELAYED_WORK(&con->work, con_work);
414 * We maintain a global counter to order connection attempts. Get
415 * a unique seq greater than @gt.
417 static u32 get_global_seq(struct ceph_messenger *msgr, u32 gt)
421 spin_lock(&msgr->global_seq_lock);
422 if (msgr->global_seq < gt)
423 msgr->global_seq = gt;
424 ret = ++msgr->global_seq;
425 spin_unlock(&msgr->global_seq_lock);
431 * Prepare footer for currently outgoing message, and finish things
432 * off. Assumes out_kvec* are already valid.. we just add on to the end.
434 static void prepare_write_message_footer(struct ceph_connection *con, int v)
436 struct ceph_msg *m = con->out_msg;
438 dout("prepare_write_message_footer %p\n", con);
439 con->out_kvec_is_msg = true;
440 con->out_kvec[v].iov_base = &m->footer;
441 con->out_kvec[v].iov_len = sizeof(m->footer);
442 con->out_kvec_bytes += sizeof(m->footer);
443 con->out_kvec_left++;
444 con->out_more = m->more_to_follow;
445 con->out_msg_done = true;
449 * Prepare headers for the next outgoing message.
451 static void prepare_write_message(struct ceph_connection *con)
456 con->out_kvec_bytes = 0;
457 con->out_kvec_is_msg = true;
458 con->out_msg_done = false;
460 /* Sneak an ack in there first? If we can get it into the same
461 * TCP packet that's a good thing. */
462 if (con->in_seq > con->in_seq_acked) {
463 con->in_seq_acked = con->in_seq;
464 con->out_kvec[v].iov_base = &tag_ack;
465 con->out_kvec[v++].iov_len = 1;
466 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
467 con->out_kvec[v].iov_base = &con->out_temp_ack;
468 con->out_kvec[v++].iov_len = sizeof(con->out_temp_ack);
469 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
472 m = list_first_entry(&con->out_queue,
473 struct ceph_msg, list_head);
475 if (test_bit(LOSSYTX, &con->state)) {
476 list_del_init(&m->list_head);
478 /* put message on sent list */
480 list_move_tail(&m->list_head, &con->out_sent);
484 * only assign outgoing seq # if we haven't sent this message
485 * yet. if it is requeued, resend with it's original seq.
487 if (m->needs_out_seq) {
488 m->hdr.seq = cpu_to_le64(++con->out_seq);
489 m->needs_out_seq = false;
492 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
493 m, con->out_seq, le16_to_cpu(m->hdr.type),
494 le32_to_cpu(m->hdr.front_len), le32_to_cpu(m->hdr.middle_len),
495 le32_to_cpu(m->hdr.data_len),
497 BUG_ON(le32_to_cpu(m->hdr.front_len) != m->front.iov_len);
499 /* tag + hdr + front + middle */
500 con->out_kvec[v].iov_base = &tag_msg;
501 con->out_kvec[v++].iov_len = 1;
502 con->out_kvec[v].iov_base = &m->hdr;
503 con->out_kvec[v++].iov_len = sizeof(m->hdr);
504 con->out_kvec[v++] = m->front;
506 con->out_kvec[v++] = m->middle->vec;
507 con->out_kvec_left = v;
508 con->out_kvec_bytes += 1 + sizeof(m->hdr) + m->front.iov_len +
509 (m->middle ? m->middle->vec.iov_len : 0);
510 con->out_kvec_cur = con->out_kvec;
512 /* fill in crc (except data pages), footer */
513 con->out_msg->hdr.crc =
514 cpu_to_le32(crc32c(0, (void *)&m->hdr,
515 sizeof(m->hdr) - sizeof(m->hdr.crc)));
516 con->out_msg->footer.flags = CEPH_MSG_FOOTER_COMPLETE;
517 con->out_msg->footer.front_crc =
518 cpu_to_le32(crc32c(0, m->front.iov_base, m->front.iov_len));
520 con->out_msg->footer.middle_crc =
521 cpu_to_le32(crc32c(0, m->middle->vec.iov_base,
522 m->middle->vec.iov_len));
524 con->out_msg->footer.middle_crc = 0;
525 con->out_msg->footer.data_crc = 0;
526 dout("prepare_write_message front_crc %u data_crc %u\n",
527 le32_to_cpu(con->out_msg->footer.front_crc),
528 le32_to_cpu(con->out_msg->footer.middle_crc));
530 /* is there a data payload? */
531 if (le32_to_cpu(m->hdr.data_len) > 0) {
532 /* initialize page iterator */
533 con->out_msg_pos.page = 0;
535 con->out_msg_pos.page_pos =
536 le16_to_cpu(m->hdr.data_off) & ~PAGE_MASK;
538 con->out_msg_pos.page_pos = 0;
539 con->out_msg_pos.data_pos = 0;
540 con->out_msg_pos.did_page_crc = 0;
541 con->out_more = 1; /* data + footer will follow */
543 /* no, queue up footer too and be done */
544 prepare_write_message_footer(con, v);
547 set_bit(WRITE_PENDING, &con->state);
553 static void prepare_write_ack(struct ceph_connection *con)
555 dout("prepare_write_ack %p %llu -> %llu\n", con,
556 con->in_seq_acked, con->in_seq);
557 con->in_seq_acked = con->in_seq;
559 con->out_kvec[0].iov_base = &tag_ack;
560 con->out_kvec[0].iov_len = 1;
561 con->out_temp_ack = cpu_to_le64(con->in_seq_acked);
562 con->out_kvec[1].iov_base = &con->out_temp_ack;
563 con->out_kvec[1].iov_len = sizeof(con->out_temp_ack);
564 con->out_kvec_left = 2;
565 con->out_kvec_bytes = 1 + sizeof(con->out_temp_ack);
566 con->out_kvec_cur = con->out_kvec;
567 con->out_more = 1; /* more will follow.. eventually.. */
568 set_bit(WRITE_PENDING, &con->state);
572 * Prepare to write keepalive byte.
574 static void prepare_write_keepalive(struct ceph_connection *con)
576 dout("prepare_write_keepalive %p\n", con);
577 con->out_kvec[0].iov_base = &tag_keepalive;
578 con->out_kvec[0].iov_len = 1;
579 con->out_kvec_left = 1;
580 con->out_kvec_bytes = 1;
581 con->out_kvec_cur = con->out_kvec;
582 set_bit(WRITE_PENDING, &con->state);
586 * Connection negotiation.
589 static void prepare_connect_authorizer(struct ceph_connection *con)
593 int auth_protocol = 0;
595 mutex_unlock(&con->mutex);
596 if (con->ops->get_authorizer)
597 con->ops->get_authorizer(con, &auth_buf, &auth_len,
598 &auth_protocol, &con->auth_reply_buf,
599 &con->auth_reply_buf_len,
601 mutex_lock(&con->mutex);
603 con->out_connect.authorizer_protocol = cpu_to_le32(auth_protocol);
604 con->out_connect.authorizer_len = cpu_to_le32(auth_len);
606 con->out_kvec[con->out_kvec_left].iov_base = auth_buf;
607 con->out_kvec[con->out_kvec_left].iov_len = auth_len;
608 con->out_kvec_left++;
609 con->out_kvec_bytes += auth_len;
613 * We connected to a peer and are saying hello.
615 static void prepare_write_banner(struct ceph_messenger *msgr,
616 struct ceph_connection *con)
618 int len = strlen(CEPH_BANNER);
620 con->out_kvec[0].iov_base = CEPH_BANNER;
621 con->out_kvec[0].iov_len = len;
622 con->out_kvec[1].iov_base = &msgr->my_enc_addr;
623 con->out_kvec[1].iov_len = sizeof(msgr->my_enc_addr);
624 con->out_kvec_left = 2;
625 con->out_kvec_bytes = len + sizeof(msgr->my_enc_addr);
626 con->out_kvec_cur = con->out_kvec;
628 set_bit(WRITE_PENDING, &con->state);
631 static void prepare_write_connect(struct ceph_messenger *msgr,
632 struct ceph_connection *con,
635 unsigned global_seq = get_global_seq(con->msgr, 0);
638 switch (con->peer_name.type) {
639 case CEPH_ENTITY_TYPE_MON:
640 proto = CEPH_MONC_PROTOCOL;
642 case CEPH_ENTITY_TYPE_OSD:
643 proto = CEPH_OSDC_PROTOCOL;
645 case CEPH_ENTITY_TYPE_MDS:
646 proto = CEPH_MDSC_PROTOCOL;
652 dout("prepare_write_connect %p cseq=%d gseq=%d proto=%d\n", con,
653 con->connect_seq, global_seq, proto);
655 con->out_connect.features = cpu_to_le64(CEPH_FEATURE_SUPPORTED);
656 con->out_connect.host_type = cpu_to_le32(CEPH_ENTITY_TYPE_CLIENT);
657 con->out_connect.connect_seq = cpu_to_le32(con->connect_seq);
658 con->out_connect.global_seq = cpu_to_le32(global_seq);
659 con->out_connect.protocol_version = cpu_to_le32(proto);
660 con->out_connect.flags = 0;
663 con->out_kvec_left = 0;
664 con->out_kvec_bytes = 0;
666 con->out_kvec[con->out_kvec_left].iov_base = &con->out_connect;
667 con->out_kvec[con->out_kvec_left].iov_len = sizeof(con->out_connect);
668 con->out_kvec_left++;
669 con->out_kvec_bytes += sizeof(con->out_connect);
670 con->out_kvec_cur = con->out_kvec;
672 set_bit(WRITE_PENDING, &con->state);
674 prepare_connect_authorizer(con);
679 * write as much of pending kvecs to the socket as we can.
681 * 0 -> socket full, but more to do
684 static int write_partial_kvec(struct ceph_connection *con)
688 dout("write_partial_kvec %p %d left\n", con, con->out_kvec_bytes);
689 while (con->out_kvec_bytes > 0) {
690 ret = ceph_tcp_sendmsg(con->sock, con->out_kvec_cur,
691 con->out_kvec_left, con->out_kvec_bytes,
695 con->out_kvec_bytes -= ret;
696 if (con->out_kvec_bytes == 0)
699 if (ret >= con->out_kvec_cur->iov_len) {
700 ret -= con->out_kvec_cur->iov_len;
702 con->out_kvec_left--;
704 con->out_kvec_cur->iov_len -= ret;
705 con->out_kvec_cur->iov_base += ret;
711 con->out_kvec_left = 0;
712 con->out_kvec_is_msg = false;
715 dout("write_partial_kvec %p %d left in %d kvecs ret = %d\n", con,
716 con->out_kvec_bytes, con->out_kvec_left, ret);
717 return ret; /* done! */
721 static void init_bio_iter(struct bio *bio, struct bio **iter, int *seg)
732 static void iter_bio_next(struct bio **bio_iter, int *seg)
734 if (*bio_iter == NULL)
737 BUG_ON(*seg >= (*bio_iter)->bi_vcnt);
740 if (*seg == (*bio_iter)->bi_vcnt)
741 init_bio_iter((*bio_iter)->bi_next, bio_iter, seg);
746 * Write as much message data payload as we can. If we finish, queue
748 * 1 -> done, footer is now queued in out_kvec[].
749 * 0 -> socket full, but more to do
752 static int write_partial_msg_pages(struct ceph_connection *con)
754 struct ceph_msg *msg = con->out_msg;
755 unsigned data_len = le32_to_cpu(msg->hdr.data_len);
757 int crc = con->msgr->nocrc;
761 size_t trail_len = (msg->trail ? msg->trail->length : 0);
763 dout("write_partial_msg_pages %p msg %p page %d/%d offset %d\n",
764 con, con->out_msg, con->out_msg_pos.page, con->out_msg->nr_pages,
765 con->out_msg_pos.page_pos);
768 if (msg->bio && !msg->bio_iter)
769 init_bio_iter(msg->bio, &msg->bio_iter, &msg->bio_seg);
772 while (data_len > con->out_msg_pos.data_pos) {
773 struct page *page = NULL;
775 int max_write = PAGE_SIZE;
778 total_max_write = data_len - trail_len -
779 con->out_msg_pos.data_pos;
782 * if we are calculating the data crc (the default), we need
783 * to map the page. if our pages[] has been revoked, use the
787 /* have we reached the trail part of the data? */
788 if (con->out_msg_pos.data_pos >= data_len - trail_len) {
791 total_max_write = data_len - con->out_msg_pos.data_pos;
793 page = list_first_entry(&msg->trail->head,
797 max_write = PAGE_SIZE;
798 } else if (msg->pages) {
799 page = msg->pages[con->out_msg_pos.page];
802 } else if (msg->pagelist) {
803 page = list_first_entry(&msg->pagelist->head,
808 } else if (msg->bio) {
811 bv = bio_iovec_idx(msg->bio_iter, msg->bio_seg);
813 page_shift = bv->bv_offset;
815 kaddr = kmap(page) + page_shift;
816 max_write = bv->bv_len;
819 page = con->msgr->zero_page;
821 kaddr = page_address(con->msgr->zero_page);
823 len = min_t(int, max_write - con->out_msg_pos.page_pos,
826 if (crc && !con->out_msg_pos.did_page_crc) {
827 void *base = kaddr + con->out_msg_pos.page_pos;
828 u32 tmpcrc = le32_to_cpu(con->out_msg->footer.data_crc);
830 BUG_ON(kaddr == NULL);
831 con->out_msg->footer.data_crc =
832 cpu_to_le32(crc32c(tmpcrc, base, len));
833 con->out_msg_pos.did_page_crc = 1;
835 ret = kernel_sendpage(con->sock, page,
836 con->out_msg_pos.page_pos + page_shift,
838 MSG_DONTWAIT | MSG_NOSIGNAL |
842 (msg->pages || msg->pagelist || msg->bio || in_trail))
848 con->out_msg_pos.data_pos += ret;
849 con->out_msg_pos.page_pos += ret;
851 con->out_msg_pos.page_pos = 0;
852 con->out_msg_pos.page++;
853 con->out_msg_pos.did_page_crc = 0;
855 list_move_tail(&page->lru,
857 else if (msg->pagelist)
858 list_move_tail(&page->lru,
859 &msg->pagelist->head);
862 iter_bio_next(&msg->bio_iter, &msg->bio_seg);
867 dout("write_partial_msg_pages %p msg %p done\n", con, msg);
869 /* prepare and queue up footer, too */
871 con->out_msg->footer.flags |= CEPH_MSG_FOOTER_NOCRC;
872 con->out_kvec_bytes = 0;
873 con->out_kvec_left = 0;
874 con->out_kvec_cur = con->out_kvec;
875 prepare_write_message_footer(con, 0);
884 static int write_partial_skip(struct ceph_connection *con)
888 while (con->out_skip > 0) {
890 .iov_base = page_address(con->msgr->zero_page),
891 .iov_len = min(con->out_skip, (int)PAGE_CACHE_SIZE)
894 ret = ceph_tcp_sendmsg(con->sock, &iov, 1, iov.iov_len, 1);
897 con->out_skip -= ret;
905 * Prepare to read connection handshake, or an ack.
907 static void prepare_read_banner(struct ceph_connection *con)
909 dout("prepare_read_banner %p\n", con);
910 con->in_base_pos = 0;
913 static void prepare_read_connect(struct ceph_connection *con)
915 dout("prepare_read_connect %p\n", con);
916 con->in_base_pos = 0;
919 static void prepare_read_ack(struct ceph_connection *con)
921 dout("prepare_read_ack %p\n", con);
922 con->in_base_pos = 0;
925 static void prepare_read_tag(struct ceph_connection *con)
927 dout("prepare_read_tag %p\n", con);
928 con->in_base_pos = 0;
929 con->in_tag = CEPH_MSGR_TAG_READY;
933 * Prepare to read a message.
935 static int prepare_read_message(struct ceph_connection *con)
937 dout("prepare_read_message %p\n", con);
938 BUG_ON(con->in_msg != NULL);
939 con->in_base_pos = 0;
940 con->in_front_crc = con->in_middle_crc = con->in_data_crc = 0;
945 static int read_partial(struct ceph_connection *con,
946 int *to, int size, void *object)
949 while (con->in_base_pos < *to) {
950 int left = *to - con->in_base_pos;
951 int have = size - left;
952 int ret = ceph_tcp_recvmsg(con->sock, object + have, left);
955 con->in_base_pos += ret;
962 * Read all or part of the connect-side handshake on a new connection
964 static int read_partial_banner(struct ceph_connection *con)
968 dout("read_partial_banner %p at %d\n", con, con->in_base_pos);
971 ret = read_partial(con, &to, strlen(CEPH_BANNER), con->in_banner);
974 ret = read_partial(con, &to, sizeof(con->actual_peer_addr),
975 &con->actual_peer_addr);
978 ret = read_partial(con, &to, sizeof(con->peer_addr_for_me),
979 &con->peer_addr_for_me);
986 static int read_partial_connect(struct ceph_connection *con)
990 dout("read_partial_connect %p at %d\n", con, con->in_base_pos);
992 ret = read_partial(con, &to, sizeof(con->in_reply), &con->in_reply);
995 ret = read_partial(con, &to, le32_to_cpu(con->in_reply.authorizer_len),
996 con->auth_reply_buf);
1000 dout("read_partial_connect %p tag %d, con_seq = %u, g_seq = %u\n",
1001 con, (int)con->in_reply.tag,
1002 le32_to_cpu(con->in_reply.connect_seq),
1003 le32_to_cpu(con->in_reply.global_seq));
1010 * Verify the hello banner looks okay.
1012 static int verify_hello(struct ceph_connection *con)
1014 if (memcmp(con->in_banner, CEPH_BANNER, strlen(CEPH_BANNER))) {
1015 pr_err("connect to %s got bad banner\n",
1016 pr_addr(&con->peer_addr.in_addr));
1017 con->error_msg = "protocol error, bad banner";
1023 static bool addr_is_blank(struct sockaddr_storage *ss)
1025 switch (ss->ss_family) {
1027 return ((struct sockaddr_in *)ss)->sin_addr.s_addr == 0;
1030 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[0] == 0 &&
1031 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[1] == 0 &&
1032 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[2] == 0 &&
1033 ((struct sockaddr_in6 *)ss)->sin6_addr.s6_addr32[3] == 0;
1038 static int addr_port(struct sockaddr_storage *ss)
1040 switch (ss->ss_family) {
1042 return ntohs(((struct sockaddr_in *)ss)->sin_port);
1044 return ntohs(((struct sockaddr_in6 *)ss)->sin6_port);
1049 static void addr_set_port(struct sockaddr_storage *ss, int p)
1051 switch (ss->ss_family) {
1053 ((struct sockaddr_in *)ss)->sin_port = htons(p);
1055 ((struct sockaddr_in6 *)ss)->sin6_port = htons(p);
1060 * Parse an ip[:port] list into an addr array. Use the default
1061 * monitor port if a port isn't specified.
1063 int ceph_parse_ips(const char *c, const char *end,
1064 struct ceph_entity_addr *addr,
1065 int max_count, int *count)
1070 dout("parse_ips on '%.*s'\n", (int)(end-c), c);
1071 for (i = 0; i < max_count; i++) {
1073 struct sockaddr_storage *ss = &addr[i].in_addr;
1074 struct sockaddr_in *in4 = (void *)ss;
1075 struct sockaddr_in6 *in6 = (void *)ss;
1084 memset(ss, 0, sizeof(*ss));
1085 if (in4_pton(p, end - p, (u8 *)&in4->sin_addr.s_addr,
1087 ss->ss_family = AF_INET;
1088 else if (in6_pton(p, end - p, (u8 *)&in6->sin6_addr.s6_addr,
1090 ss->ss_family = AF_INET6;
1097 dout("missing matching ']'\n");
1104 if (p < end && *p == ':') {
1107 while (p < end && *p >= '0' && *p <= '9') {
1108 port = (port * 10) + (*p - '0');
1111 if (port > 65535 || port == 0)
1114 port = CEPH_MON_PORT;
1117 addr_set_port(ss, port);
1119 dout("parse_ips got %s\n", pr_addr(ss));
1136 pr_err("parse_ips bad ip '%.*s'\n", (int)(end - c), c);
1140 static int process_banner(struct ceph_connection *con)
1142 dout("process_banner on %p\n", con);
1144 if (verify_hello(con) < 0)
1147 ceph_decode_addr(&con->actual_peer_addr);
1148 ceph_decode_addr(&con->peer_addr_for_me);
1151 * Make sure the other end is who we wanted. note that the other
1152 * end may not yet know their ip address, so if it's 0.0.0.0, give
1153 * them the benefit of the doubt.
1155 if (memcmp(&con->peer_addr, &con->actual_peer_addr,
1156 sizeof(con->peer_addr)) != 0 &&
1157 !(addr_is_blank(&con->actual_peer_addr.in_addr) &&
1158 con->actual_peer_addr.nonce == con->peer_addr.nonce)) {
1159 pr_warning("wrong peer, want %s/%d, got %s/%d\n",
1160 pr_addr(&con->peer_addr.in_addr),
1161 (int)le32_to_cpu(con->peer_addr.nonce),
1162 pr_addr(&con->actual_peer_addr.in_addr),
1163 (int)le32_to_cpu(con->actual_peer_addr.nonce));
1164 con->error_msg = "wrong peer at address";
1169 * did we learn our address?
1171 if (addr_is_blank(&con->msgr->inst.addr.in_addr)) {
1172 int port = addr_port(&con->msgr->inst.addr.in_addr);
1174 memcpy(&con->msgr->inst.addr.in_addr,
1175 &con->peer_addr_for_me.in_addr,
1176 sizeof(con->peer_addr_for_me.in_addr));
1177 addr_set_port(&con->msgr->inst.addr.in_addr, port);
1178 encode_my_addr(con->msgr);
1179 dout("process_banner learned my addr is %s\n",
1180 pr_addr(&con->msgr->inst.addr.in_addr));
1183 set_bit(NEGOTIATING, &con->state);
1184 prepare_read_connect(con);
1188 static void fail_protocol(struct ceph_connection *con)
1190 reset_connection(con);
1191 set_bit(CLOSED, &con->state); /* in case there's queued work */
1193 mutex_unlock(&con->mutex);
1194 if (con->ops->bad_proto)
1195 con->ops->bad_proto(con);
1196 mutex_lock(&con->mutex);
1199 static int process_connect(struct ceph_connection *con)
1201 u64 sup_feat = CEPH_FEATURE_SUPPORTED;
1202 u64 req_feat = CEPH_FEATURE_REQUIRED;
1203 u64 server_feat = le64_to_cpu(con->in_reply.features);
1205 dout("process_connect on %p tag %d\n", con, (int)con->in_tag);
1207 switch (con->in_reply.tag) {
1208 case CEPH_MSGR_TAG_FEATURES:
1209 pr_err("%s%lld %s feature set mismatch,"
1210 " my %llx < server's %llx, missing %llx\n",
1211 ENTITY_NAME(con->peer_name),
1212 pr_addr(&con->peer_addr.in_addr),
1213 sup_feat, server_feat, server_feat & ~sup_feat);
1214 con->error_msg = "missing required protocol features";
1218 case CEPH_MSGR_TAG_BADPROTOVER:
1219 pr_err("%s%lld %s protocol version mismatch,"
1220 " my %d != server's %d\n",
1221 ENTITY_NAME(con->peer_name),
1222 pr_addr(&con->peer_addr.in_addr),
1223 le32_to_cpu(con->out_connect.protocol_version),
1224 le32_to_cpu(con->in_reply.protocol_version));
1225 con->error_msg = "protocol version mismatch";
1229 case CEPH_MSGR_TAG_BADAUTHORIZER:
1231 dout("process_connect %p got BADAUTHORIZER attempt %d\n", con,
1233 if (con->auth_retry == 2) {
1234 con->error_msg = "connect authorization failure";
1235 reset_connection(con);
1236 set_bit(CLOSED, &con->state);
1239 con->auth_retry = 1;
1240 prepare_write_connect(con->msgr, con, 0);
1241 prepare_read_connect(con);
1244 case CEPH_MSGR_TAG_RESETSESSION:
1246 * If we connected with a large connect_seq but the peer
1247 * has no record of a session with us (no connection, or
1248 * connect_seq == 0), they will send RESETSESION to indicate
1249 * that they must have reset their session, and may have
1252 dout("process_connect got RESET peer seq %u\n",
1253 le32_to_cpu(con->in_connect.connect_seq));
1254 pr_err("%s%lld %s connection reset\n",
1255 ENTITY_NAME(con->peer_name),
1256 pr_addr(&con->peer_addr.in_addr));
1257 reset_connection(con);
1258 prepare_write_connect(con->msgr, con, 0);
1259 prepare_read_connect(con);
1261 /* Tell ceph about it. */
1262 mutex_unlock(&con->mutex);
1263 pr_info("reset on %s%lld\n", ENTITY_NAME(con->peer_name));
1264 if (con->ops->peer_reset)
1265 con->ops->peer_reset(con);
1266 mutex_lock(&con->mutex);
1269 case CEPH_MSGR_TAG_RETRY_SESSION:
1271 * If we sent a smaller connect_seq than the peer has, try
1272 * again with a larger value.
1274 dout("process_connect got RETRY my seq = %u, peer_seq = %u\n",
1275 le32_to_cpu(con->out_connect.connect_seq),
1276 le32_to_cpu(con->in_connect.connect_seq));
1277 con->connect_seq = le32_to_cpu(con->in_connect.connect_seq);
1278 prepare_write_connect(con->msgr, con, 0);
1279 prepare_read_connect(con);
1282 case CEPH_MSGR_TAG_RETRY_GLOBAL:
1284 * If we sent a smaller global_seq than the peer has, try
1285 * again with a larger value.
1287 dout("process_connect got RETRY_GLOBAL my %u peer_gseq %u\n",
1288 con->peer_global_seq,
1289 le32_to_cpu(con->in_connect.global_seq));
1290 get_global_seq(con->msgr,
1291 le32_to_cpu(con->in_connect.global_seq));
1292 prepare_write_connect(con->msgr, con, 0);
1293 prepare_read_connect(con);
1296 case CEPH_MSGR_TAG_READY:
1297 if (req_feat & ~server_feat) {
1298 pr_err("%s%lld %s protocol feature mismatch,"
1299 " my required %llx > server's %llx, need %llx\n",
1300 ENTITY_NAME(con->peer_name),
1301 pr_addr(&con->peer_addr.in_addr),
1302 req_feat, server_feat, req_feat & ~server_feat);
1303 con->error_msg = "missing required protocol features";
1307 clear_bit(CONNECTING, &con->state);
1308 con->peer_global_seq = le32_to_cpu(con->in_reply.global_seq);
1310 con->peer_features = server_feat;
1311 dout("process_connect got READY gseq %d cseq %d (%d)\n",
1312 con->peer_global_seq,
1313 le32_to_cpu(con->in_reply.connect_seq),
1315 WARN_ON(con->connect_seq !=
1316 le32_to_cpu(con->in_reply.connect_seq));
1318 if (con->in_reply.flags & CEPH_MSG_CONNECT_LOSSY)
1319 set_bit(LOSSYTX, &con->state);
1321 prepare_read_tag(con);
1324 case CEPH_MSGR_TAG_WAIT:
1326 * If there is a connection race (we are opening
1327 * connections to each other), one of us may just have
1328 * to WAIT. This shouldn't happen if we are the
1331 pr_err("process_connect peer connecting WAIT\n");
1334 pr_err("connect protocol error, will retry\n");
1335 con->error_msg = "protocol error, garbage tag during connect";
1343 * read (part of) an ack
1345 static int read_partial_ack(struct ceph_connection *con)
1349 return read_partial(con, &to, sizeof(con->in_temp_ack),
1355 * We can finally discard anything that's been acked.
1357 static void process_ack(struct ceph_connection *con)
1360 u64 ack = le64_to_cpu(con->in_temp_ack);
1363 while (!list_empty(&con->out_sent)) {
1364 m = list_first_entry(&con->out_sent, struct ceph_msg,
1366 seq = le64_to_cpu(m->hdr.seq);
1369 dout("got ack for seq %llu type %d at %p\n", seq,
1370 le16_to_cpu(m->hdr.type), m);
1373 prepare_read_tag(con);
1379 static int read_partial_message_section(struct ceph_connection *con,
1380 struct kvec *section,
1381 unsigned int sec_len, u32 *crc)
1387 while (section->iov_len < sec_len) {
1388 BUG_ON(section->iov_base == NULL);
1389 left = sec_len - section->iov_len;
1390 ret = ceph_tcp_recvmsg(con->sock, (char *)section->iov_base +
1391 section->iov_len, left);
1394 section->iov_len += ret;
1395 if (section->iov_len == sec_len)
1396 *crc = crc32c(0, section->iov_base,
1403 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
1404 struct ceph_msg_header *hdr,
1408 static int read_partial_message_pages(struct ceph_connection *con,
1409 struct page **pages,
1410 unsigned data_len, int datacrc)
1416 left = min((int)(data_len - con->in_msg_pos.data_pos),
1417 (int)(PAGE_SIZE - con->in_msg_pos.page_pos));
1419 BUG_ON(pages == NULL);
1420 p = kmap(pages[con->in_msg_pos.page]);
1421 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1423 if (ret > 0 && datacrc)
1425 crc32c(con->in_data_crc,
1426 p + con->in_msg_pos.page_pos, ret);
1427 kunmap(pages[con->in_msg_pos.page]);
1430 con->in_msg_pos.data_pos += ret;
1431 con->in_msg_pos.page_pos += ret;
1432 if (con->in_msg_pos.page_pos == PAGE_SIZE) {
1433 con->in_msg_pos.page_pos = 0;
1434 con->in_msg_pos.page++;
1441 static int read_partial_message_bio(struct ceph_connection *con,
1442 struct bio **bio_iter, int *bio_seg,
1443 unsigned data_len, int datacrc)
1445 struct bio_vec *bv = bio_iovec_idx(*bio_iter, *bio_seg);
1452 left = min((int)(data_len - con->in_msg_pos.data_pos),
1453 (int)(bv->bv_len - con->in_msg_pos.page_pos));
1455 p = kmap(bv->bv_page) + bv->bv_offset;
1457 ret = ceph_tcp_recvmsg(con->sock, p + con->in_msg_pos.page_pos,
1459 if (ret > 0 && datacrc)
1461 crc32c(con->in_data_crc,
1462 p + con->in_msg_pos.page_pos, ret);
1463 kunmap(bv->bv_page);
1466 con->in_msg_pos.data_pos += ret;
1467 con->in_msg_pos.page_pos += ret;
1468 if (con->in_msg_pos.page_pos == bv->bv_len) {
1469 con->in_msg_pos.page_pos = 0;
1470 iter_bio_next(bio_iter, bio_seg);
1478 * read (part of) a message.
1480 static int read_partial_message(struct ceph_connection *con)
1482 struct ceph_msg *m = con->in_msg;
1485 unsigned front_len, middle_len, data_len, data_off;
1486 int datacrc = con->msgr->nocrc;
1490 dout("read_partial_message con %p msg %p\n", con, m);
1493 while (con->in_base_pos < sizeof(con->in_hdr)) {
1494 left = sizeof(con->in_hdr) - con->in_base_pos;
1495 ret = ceph_tcp_recvmsg(con->sock,
1496 (char *)&con->in_hdr + con->in_base_pos,
1500 con->in_base_pos += ret;
1501 if (con->in_base_pos == sizeof(con->in_hdr)) {
1502 u32 crc = crc32c(0, (void *)&con->in_hdr,
1503 sizeof(con->in_hdr) - sizeof(con->in_hdr.crc));
1504 if (crc != le32_to_cpu(con->in_hdr.crc)) {
1505 pr_err("read_partial_message bad hdr "
1506 " crc %u != expected %u\n",
1507 crc, con->in_hdr.crc);
1512 front_len = le32_to_cpu(con->in_hdr.front_len);
1513 if (front_len > CEPH_MSG_MAX_FRONT_LEN)
1515 middle_len = le32_to_cpu(con->in_hdr.middle_len);
1516 if (middle_len > CEPH_MSG_MAX_DATA_LEN)
1518 data_len = le32_to_cpu(con->in_hdr.data_len);
1519 if (data_len > CEPH_MSG_MAX_DATA_LEN)
1521 data_off = le16_to_cpu(con->in_hdr.data_off);
1524 seq = le64_to_cpu(con->in_hdr.seq);
1525 if ((s64)seq - (s64)con->in_seq < 1) {
1526 pr_info("skipping %s%lld %s seq %lld, expected %lld\n",
1527 ENTITY_NAME(con->peer_name),
1528 pr_addr(&con->peer_addr.in_addr),
1529 seq, con->in_seq + 1);
1530 con->in_base_pos = -front_len - middle_len - data_len -
1532 con->in_tag = CEPH_MSGR_TAG_READY;
1535 } else if ((s64)seq - (s64)con->in_seq > 1) {
1536 pr_err("read_partial_message bad seq %lld expected %lld\n",
1537 seq, con->in_seq + 1);
1538 con->error_msg = "bad message sequence # for incoming message";
1542 /* allocate message? */
1544 dout("got hdr type %d front %d data %d\n", con->in_hdr.type,
1545 con->in_hdr.front_len, con->in_hdr.data_len);
1547 con->in_msg = ceph_alloc_msg(con, &con->in_hdr, &skip);
1549 /* skip this message */
1550 dout("alloc_msg said skip message\n");
1551 BUG_ON(con->in_msg);
1552 con->in_base_pos = -front_len - middle_len - data_len -
1554 con->in_tag = CEPH_MSGR_TAG_READY;
1560 "error allocating memory for incoming message";
1564 m->front.iov_len = 0; /* haven't read it yet */
1566 m->middle->vec.iov_len = 0;
1568 con->in_msg_pos.page = 0;
1570 con->in_msg_pos.page_pos = data_off & ~PAGE_MASK;
1572 con->in_msg_pos.page_pos = 0;
1573 con->in_msg_pos.data_pos = 0;
1577 ret = read_partial_message_section(con, &m->front, front_len,
1578 &con->in_front_crc);
1584 ret = read_partial_message_section(con, &m->middle->vec,
1586 &con->in_middle_crc);
1591 if (m->bio && !m->bio_iter)
1592 init_bio_iter(m->bio, &m->bio_iter, &m->bio_seg);
1596 while (con->in_msg_pos.data_pos < data_len) {
1598 ret = read_partial_message_pages(con, m->pages,
1603 } else if (m->bio) {
1605 ret = read_partial_message_bio(con,
1606 &m->bio_iter, &m->bio_seg,
1617 to = sizeof(m->hdr) + sizeof(m->footer);
1618 while (con->in_base_pos < to) {
1619 left = to - con->in_base_pos;
1620 ret = ceph_tcp_recvmsg(con->sock, (char *)&m->footer +
1621 (con->in_base_pos - sizeof(m->hdr)),
1625 con->in_base_pos += ret;
1627 dout("read_partial_message got msg %p %d (%u) + %d (%u) + %d (%u)\n",
1628 m, front_len, m->footer.front_crc, middle_len,
1629 m->footer.middle_crc, data_len, m->footer.data_crc);
1632 if (con->in_front_crc != le32_to_cpu(m->footer.front_crc)) {
1633 pr_err("read_partial_message %p front crc %u != exp. %u\n",
1634 m, con->in_front_crc, m->footer.front_crc);
1637 if (con->in_middle_crc != le32_to_cpu(m->footer.middle_crc)) {
1638 pr_err("read_partial_message %p middle crc %u != exp %u\n",
1639 m, con->in_middle_crc, m->footer.middle_crc);
1643 (m->footer.flags & CEPH_MSG_FOOTER_NOCRC) == 0 &&
1644 con->in_data_crc != le32_to_cpu(m->footer.data_crc)) {
1645 pr_err("read_partial_message %p data crc %u != exp. %u\n", m,
1646 con->in_data_crc, le32_to_cpu(m->footer.data_crc));
1650 return 1; /* done! */
1654 * Process message. This happens in the worker thread. The callback should
1655 * be careful not to do anything that waits on other incoming messages or it
1658 static void process_message(struct ceph_connection *con)
1660 struct ceph_msg *msg;
1665 /* if first message, set peer_name */
1666 if (con->peer_name.type == 0)
1667 con->peer_name = msg->hdr.src;
1670 mutex_unlock(&con->mutex);
1672 dout("===== %p %llu from %s%lld %d=%s len %d+%d (%u %u %u) =====\n",
1673 msg, le64_to_cpu(msg->hdr.seq),
1674 ENTITY_NAME(msg->hdr.src),
1675 le16_to_cpu(msg->hdr.type),
1676 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
1677 le32_to_cpu(msg->hdr.front_len),
1678 le32_to_cpu(msg->hdr.data_len),
1679 con->in_front_crc, con->in_middle_crc, con->in_data_crc);
1680 con->ops->dispatch(con, msg);
1682 mutex_lock(&con->mutex);
1683 prepare_read_tag(con);
1688 * Write something to the socket. Called in a worker thread when the
1689 * socket appears to be writeable and we have something ready to send.
1691 static int try_write(struct ceph_connection *con)
1693 struct ceph_messenger *msgr = con->msgr;
1696 dout("try_write start %p state %lu nref %d\n", con, con->state,
1697 atomic_read(&con->nref));
1700 dout("try_write out_kvec_bytes %d\n", con->out_kvec_bytes);
1702 /* open the socket first? */
1703 if (con->sock == NULL) {
1705 * if we were STANDBY and are reconnecting _this_
1706 * connection, bump connect_seq now. Always bump
1709 if (test_and_clear_bit(STANDBY, &con->state))
1712 prepare_write_banner(msgr, con);
1713 prepare_write_connect(msgr, con, 1);
1714 prepare_read_banner(con);
1715 set_bit(CONNECTING, &con->state);
1716 clear_bit(NEGOTIATING, &con->state);
1718 BUG_ON(con->in_msg);
1719 con->in_tag = CEPH_MSGR_TAG_READY;
1720 dout("try_write initiating connect on %p new state %lu\n",
1722 con->sock = ceph_tcp_connect(con);
1723 if (IS_ERR(con->sock)) {
1725 con->error_msg = "connect error";
1732 /* kvec data queued? */
1733 if (con->out_skip) {
1734 ret = write_partial_skip(con);
1738 dout("try_write write_partial_skip err %d\n", ret);
1742 if (con->out_kvec_left) {
1743 ret = write_partial_kvec(con);
1750 if (con->out_msg_done) {
1751 ceph_msg_put(con->out_msg);
1752 con->out_msg = NULL; /* we're done with this one */
1756 ret = write_partial_msg_pages(con);
1758 goto more_kvec; /* we need to send the footer, too! */
1762 dout("try_write write_partial_msg_pages err %d\n",
1769 if (!test_bit(CONNECTING, &con->state)) {
1770 /* is anything else pending? */
1771 if (!list_empty(&con->out_queue)) {
1772 prepare_write_message(con);
1775 if (con->in_seq > con->in_seq_acked) {
1776 prepare_write_ack(con);
1779 if (test_and_clear_bit(KEEPALIVE_PENDING, &con->state)) {
1780 prepare_write_keepalive(con);
1785 /* Nothing to do! */
1786 clear_bit(WRITE_PENDING, &con->state);
1787 dout("try_write nothing else to write.\n");
1791 dout("try_write done on %p\n", con);
1798 * Read what we can from the socket.
1800 static int try_read(struct ceph_connection *con)
1807 if (test_bit(STANDBY, &con->state))
1810 dout("try_read start on %p\n", con);
1813 dout("try_read tag %d in_base_pos %d\n", (int)con->in_tag,
1815 if (test_bit(CONNECTING, &con->state)) {
1816 if (!test_bit(NEGOTIATING, &con->state)) {
1817 dout("try_read connecting\n");
1818 ret = read_partial_banner(con);
1821 if (process_banner(con) < 0) {
1826 ret = read_partial_connect(con);
1829 if (process_connect(con) < 0) {
1836 if (con->in_base_pos < 0) {
1838 * skipping + discarding content.
1840 * FIXME: there must be a better way to do this!
1842 static char buf[1024];
1843 int skip = min(1024, -con->in_base_pos);
1844 dout("skipping %d / %d bytes\n", skip, -con->in_base_pos);
1845 ret = ceph_tcp_recvmsg(con->sock, buf, skip);
1848 con->in_base_pos += ret;
1849 if (con->in_base_pos)
1852 if (con->in_tag == CEPH_MSGR_TAG_READY) {
1856 ret = ceph_tcp_recvmsg(con->sock, &con->in_tag, 1);
1859 dout("try_read got tag %d\n", (int)con->in_tag);
1860 switch (con->in_tag) {
1861 case CEPH_MSGR_TAG_MSG:
1862 prepare_read_message(con);
1864 case CEPH_MSGR_TAG_ACK:
1865 prepare_read_ack(con);
1867 case CEPH_MSGR_TAG_CLOSE:
1868 set_bit(CLOSED, &con->state); /* fixme */
1874 if (con->in_tag == CEPH_MSGR_TAG_MSG) {
1875 ret = read_partial_message(con);
1879 con->error_msg = "bad crc";
1883 con->error_msg = "io error";
1889 if (con->in_tag == CEPH_MSGR_TAG_READY)
1891 process_message(con);
1894 if (con->in_tag == CEPH_MSGR_TAG_ACK) {
1895 ret = read_partial_ack(con);
1905 dout("try_read done on %p\n", con);
1909 pr_err("try_read bad con->in_tag = %d\n", (int)con->in_tag);
1910 con->error_msg = "protocol error, garbage tag";
1917 * Atomically queue work on a connection. Bump @con reference to
1918 * avoid races with connection teardown.
1920 * There is some trickery going on with QUEUED and BUSY because we
1921 * only want a _single_ thread operating on each connection at any
1922 * point in time, but we want to use all available CPUs.
1924 * The worker thread only proceeds if it can atomically set BUSY. It
1925 * clears QUEUED and does it's thing. When it thinks it's done, it
1926 * clears BUSY, then rechecks QUEUED.. if it's set again, it loops
1927 * (tries again to set BUSY).
1929 * To queue work, we first set QUEUED, _then_ if BUSY isn't set, we
1930 * try to queue work. If that fails (work is already queued, or BUSY)
1931 * we give up (work also already being done or is queued) but leave QUEUED
1932 * set so that the worker thread will loop if necessary.
1934 static void queue_con(struct ceph_connection *con)
1936 if (test_bit(DEAD, &con->state)) {
1937 dout("queue_con %p ignoring: DEAD\n",
1942 if (!con->ops->get(con)) {
1943 dout("queue_con %p ref count 0\n", con);
1947 set_bit(QUEUED, &con->state);
1948 if (test_bit(BUSY, &con->state)) {
1949 dout("queue_con %p - already BUSY\n", con);
1951 } else if (!queue_work(ceph_msgr_wq, &con->work.work)) {
1952 dout("queue_con %p - already queued\n", con);
1955 dout("queue_con %p\n", con);
1960 * Do some work on a connection. Drop a connection ref when we're done.
1962 static void con_work(struct work_struct *work)
1964 struct ceph_connection *con = container_of(work, struct ceph_connection,
1969 if (test_and_set_bit(BUSY, &con->state) != 0) {
1970 dout("con_work %p BUSY already set\n", con);
1973 dout("con_work %p start, clearing QUEUED\n", con);
1974 clear_bit(QUEUED, &con->state);
1976 mutex_lock(&con->mutex);
1978 if (test_bit(CLOSED, &con->state)) { /* e.g. if we are replaced */
1979 dout("con_work CLOSED\n");
1980 con_close_socket(con);
1983 if (test_and_clear_bit(OPENING, &con->state)) {
1984 /* reopen w/ new peer */
1985 dout("con_work OPENING\n");
1986 con_close_socket(con);
1989 if (test_and_clear_bit(SOCK_CLOSED, &con->state) ||
1990 try_read(con) < 0 ||
1991 try_write(con) < 0) {
1992 mutex_unlock(&con->mutex);
1994 ceph_fault(con); /* error/fault path */
1999 mutex_unlock(&con->mutex);
2002 clear_bit(BUSY, &con->state);
2003 dout("con->state=%lu\n", con->state);
2004 if (test_bit(QUEUED, &con->state)) {
2005 if (!backoff || test_bit(OPENING, &con->state)) {
2006 dout("con_work %p QUEUED reset, looping\n", con);
2009 dout("con_work %p QUEUED reset, but just faulted\n", con);
2010 clear_bit(QUEUED, &con->state);
2012 dout("con_work %p done\n", con);
2020 * Generic error/fault handler. A retry mechanism is used with
2021 * exponential backoff
2023 static void ceph_fault(struct ceph_connection *con)
2025 pr_err("%s%lld %s %s\n", ENTITY_NAME(con->peer_name),
2026 pr_addr(&con->peer_addr.in_addr), con->error_msg);
2027 dout("fault %p state %lu to peer %s\n",
2028 con, con->state, pr_addr(&con->peer_addr.in_addr));
2030 if (test_bit(LOSSYTX, &con->state)) {
2031 dout("fault on LOSSYTX channel\n");
2035 mutex_lock(&con->mutex);
2036 if (test_bit(CLOSED, &con->state))
2039 con_close_socket(con);
2042 ceph_msg_put(con->in_msg);
2046 /* Requeue anything that hasn't been acked */
2047 list_splice_init(&con->out_sent, &con->out_queue);
2049 /* If there are no messages in the queue, place the connection
2050 * in a STANDBY state (i.e., don't try to reconnect just yet). */
2051 if (list_empty(&con->out_queue) && !con->out_keepalive_pending) {
2052 dout("fault setting STANDBY\n");
2053 set_bit(STANDBY, &con->state);
2055 /* retry after a delay. */
2056 if (con->delay == 0)
2057 con->delay = BASE_DELAY_INTERVAL;
2058 else if (con->delay < MAX_DELAY_INTERVAL)
2060 dout("fault queueing %p delay %lu\n", con, con->delay);
2062 if (queue_delayed_work(ceph_msgr_wq, &con->work,
2063 round_jiffies_relative(con->delay)) == 0)
2068 mutex_unlock(&con->mutex);
2071 * in case we faulted due to authentication, invalidate our
2072 * current tickets so that we can get new ones.
2074 if (con->auth_retry && con->ops->invalidate_authorizer) {
2075 dout("calling invalidate_authorizer()\n");
2076 con->ops->invalidate_authorizer(con);
2079 if (con->ops->fault)
2080 con->ops->fault(con);
2086 * create a new messenger instance
2088 struct ceph_messenger *ceph_messenger_create(struct ceph_entity_addr *myaddr)
2090 struct ceph_messenger *msgr;
2092 msgr = kzalloc(sizeof(*msgr), GFP_KERNEL);
2094 return ERR_PTR(-ENOMEM);
2096 spin_lock_init(&msgr->global_seq_lock);
2098 /* the zero page is needed if a request is "canceled" while the message
2099 * is being written over the socket */
2100 msgr->zero_page = __page_cache_alloc(GFP_KERNEL | __GFP_ZERO);
2101 if (!msgr->zero_page) {
2103 return ERR_PTR(-ENOMEM);
2105 kmap(msgr->zero_page);
2108 msgr->inst.addr = *myaddr;
2110 /* select a random nonce */
2111 msgr->inst.addr.type = 0;
2112 get_random_bytes(&msgr->inst.addr.nonce, sizeof(msgr->inst.addr.nonce));
2113 encode_my_addr(msgr);
2115 dout("messenger_create %p\n", msgr);
2119 void ceph_messenger_destroy(struct ceph_messenger *msgr)
2121 dout("destroy %p\n", msgr);
2122 kunmap(msgr->zero_page);
2123 __free_page(msgr->zero_page);
2125 dout("destroyed messenger %p\n", msgr);
2129 * Queue up an outgoing message on the given connection.
2131 void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg)
2133 if (test_bit(CLOSED, &con->state)) {
2134 dout("con_send %p closed, dropping %p\n", con, msg);
2140 msg->hdr.src = con->msgr->inst.name;
2142 BUG_ON(msg->front.iov_len != le32_to_cpu(msg->hdr.front_len));
2144 msg->needs_out_seq = true;
2147 mutex_lock(&con->mutex);
2148 BUG_ON(!list_empty(&msg->list_head));
2149 list_add_tail(&msg->list_head, &con->out_queue);
2150 dout("----- %p to %s%lld %d=%s len %d+%d+%d -----\n", msg,
2151 ENTITY_NAME(con->peer_name), le16_to_cpu(msg->hdr.type),
2152 ceph_msg_type_name(le16_to_cpu(msg->hdr.type)),
2153 le32_to_cpu(msg->hdr.front_len),
2154 le32_to_cpu(msg->hdr.middle_len),
2155 le32_to_cpu(msg->hdr.data_len));
2156 mutex_unlock(&con->mutex);
2158 /* if there wasn't anything waiting to send before, queue
2160 if (test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2165 * Revoke a message that was previously queued for send
2167 void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg)
2169 mutex_lock(&con->mutex);
2170 if (!list_empty(&msg->list_head)) {
2171 dout("con_revoke %p msg %p - was on queue\n", con, msg);
2172 list_del_init(&msg->list_head);
2176 if (con->out_msg == msg) {
2177 dout("con_revoke %p msg %p - was sending\n", con, msg);
2178 con->out_msg = NULL;
2179 if (con->out_kvec_is_msg) {
2180 con->out_skip = con->out_kvec_bytes;
2181 con->out_kvec_is_msg = false;
2186 mutex_unlock(&con->mutex);
2190 * Revoke a message that we may be reading data into
2192 void ceph_con_revoke_message(struct ceph_connection *con, struct ceph_msg *msg)
2194 mutex_lock(&con->mutex);
2195 if (con->in_msg && con->in_msg == msg) {
2196 unsigned front_len = le32_to_cpu(con->in_hdr.front_len);
2197 unsigned middle_len = le32_to_cpu(con->in_hdr.middle_len);
2198 unsigned data_len = le32_to_cpu(con->in_hdr.data_len);
2200 /* skip rest of message */
2201 dout("con_revoke_pages %p msg %p revoked\n", con, msg);
2202 con->in_base_pos = con->in_base_pos -
2203 sizeof(struct ceph_msg_header) -
2207 sizeof(struct ceph_msg_footer);
2208 ceph_msg_put(con->in_msg);
2210 con->in_tag = CEPH_MSGR_TAG_READY;
2213 dout("con_revoke_pages %p msg %p pages %p no-op\n",
2214 con, con->in_msg, msg);
2216 mutex_unlock(&con->mutex);
2220 * Queue a keepalive byte to ensure the tcp connection is alive.
2222 void ceph_con_keepalive(struct ceph_connection *con)
2224 if (test_and_set_bit(KEEPALIVE_PENDING, &con->state) == 0 &&
2225 test_and_set_bit(WRITE_PENDING, &con->state) == 0)
2231 * construct a new message with given type, size
2232 * the new msg has a ref count of 1.
2234 struct ceph_msg *ceph_msg_new(int type, int front_len, gfp_t flags)
2238 m = kmalloc(sizeof(*m), flags);
2241 kref_init(&m->kref);
2242 INIT_LIST_HEAD(&m->list_head);
2245 m->hdr.type = cpu_to_le16(type);
2246 m->hdr.priority = cpu_to_le16(CEPH_MSG_PRIO_DEFAULT);
2248 m->hdr.front_len = cpu_to_le32(front_len);
2249 m->hdr.middle_len = 0;
2250 m->hdr.data_len = 0;
2251 m->hdr.data_off = 0;
2252 m->hdr.reserved = 0;
2253 m->footer.front_crc = 0;
2254 m->footer.middle_crc = 0;
2255 m->footer.data_crc = 0;
2256 m->footer.flags = 0;
2257 m->front_max = front_len;
2258 m->front_is_vmalloc = false;
2259 m->more_to_follow = false;
2264 if (front_len > PAGE_CACHE_SIZE) {
2265 m->front.iov_base = __vmalloc(front_len, flags,
2267 m->front_is_vmalloc = true;
2269 m->front.iov_base = kmalloc(front_len, flags);
2271 if (m->front.iov_base == NULL) {
2272 pr_err("msg_new can't allocate %d bytes\n",
2277 m->front.iov_base = NULL;
2279 m->front.iov_len = front_len;
2293 dout("ceph_msg_new %p front %d\n", m, front_len);
2299 pr_err("msg_new can't create type %d front %d\n", type, front_len);
2304 * Allocate "middle" portion of a message, if it is needed and wasn't
2305 * allocated by alloc_msg. This allows us to read a small fixed-size
2306 * per-type header in the front and then gracefully fail (i.e.,
2307 * propagate the error to the caller based on info in the front) when
2308 * the middle is too large.
2310 static int ceph_alloc_middle(struct ceph_connection *con, struct ceph_msg *msg)
2312 int type = le16_to_cpu(msg->hdr.type);
2313 int middle_len = le32_to_cpu(msg->hdr.middle_len);
2315 dout("alloc_middle %p type %d %s middle_len %d\n", msg, type,
2316 ceph_msg_type_name(type), middle_len);
2317 BUG_ON(!middle_len);
2318 BUG_ON(msg->middle);
2320 msg->middle = ceph_buffer_new(middle_len, GFP_NOFS);
2327 * Generic message allocator, for incoming messages.
2329 static struct ceph_msg *ceph_alloc_msg(struct ceph_connection *con,
2330 struct ceph_msg_header *hdr,
2333 int type = le16_to_cpu(hdr->type);
2334 int front_len = le32_to_cpu(hdr->front_len);
2335 int middle_len = le32_to_cpu(hdr->middle_len);
2336 struct ceph_msg *msg = NULL;
2339 if (con->ops->alloc_msg) {
2340 mutex_unlock(&con->mutex);
2341 msg = con->ops->alloc_msg(con, hdr, skip);
2342 mutex_lock(&con->mutex);
2348 msg = ceph_msg_new(type, front_len, GFP_NOFS);
2350 pr_err("unable to allocate msg type %d len %d\n",
2355 memcpy(&msg->hdr, &con->in_hdr, sizeof(con->in_hdr));
2357 if (middle_len && !msg->middle) {
2358 ret = ceph_alloc_middle(con, msg);
2370 * Free a generically kmalloc'd message.
2372 void ceph_msg_kfree(struct ceph_msg *m)
2374 dout("msg_kfree %p\n", m);
2375 if (m->front_is_vmalloc)
2376 vfree(m->front.iov_base);
2378 kfree(m->front.iov_base);
2383 * Drop a msg ref. Destroy as needed.
2385 void ceph_msg_last_put(struct kref *kref)
2387 struct ceph_msg *m = container_of(kref, struct ceph_msg, kref);
2389 dout("ceph_msg_put last one on %p\n", m);
2390 WARN_ON(!list_empty(&m->list_head));
2392 /* drop middle, data, if any */
2394 ceph_buffer_put(m->middle);
2401 ceph_pagelist_release(m->pagelist);
2409 ceph_msgpool_put(m->pool, m);
2414 void ceph_msg_dump(struct ceph_msg *msg)
2416 pr_debug("msg_dump %p (front_max %d nr_pages %d)\n", msg,
2417 msg->front_max, msg->nr_pages);
2418 print_hex_dump(KERN_DEBUG, "header: ",
2419 DUMP_PREFIX_OFFSET, 16, 1,
2420 &msg->hdr, sizeof(msg->hdr), true);
2421 print_hex_dump(KERN_DEBUG, " front: ",
2422 DUMP_PREFIX_OFFSET, 16, 1,
2423 msg->front.iov_base, msg->front.iov_len, true);
2425 print_hex_dump(KERN_DEBUG, "middle: ",
2426 DUMP_PREFIX_OFFSET, 16, 1,
2427 msg->middle->vec.iov_base,
2428 msg->middle->vec.iov_len, true);
2429 print_hex_dump(KERN_DEBUG, "footer: ",
2430 DUMP_PREFIX_OFFSET, 16, 1,
2431 &msg->footer, sizeof(msg->footer), true);