]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/block/drbd/drbd_receiver.c
block: unify flags for struct bio and struct request
[net-next-2.6.git] / drivers / block / drbd / drbd_receiver.c
index bc9ab7fb2cc7d6eaeda21fcfa07706cfe5a4c922..cba1deb7b271161048bab456400e8555f310b030 100644 (file)
@@ -42,7 +42,6 @@
 #include <linux/unistd.h>
 #include <linux/vmalloc.h>
 #include <linux/random.h>
-#include <linux/mm.h>
 #include <linux/string.h>
 #include <linux/scatterlist.h>
 #include "drbd_int.h"
@@ -571,6 +570,25 @@ static int drbd_recv(struct drbd_conf *mdev, void *buf, size_t size)
        return rv;
 }
 
+/* quoting tcp(7):
+ *   On individual connections, the socket buffer size must be set prior to the
+ *   listen(2) or connect(2) calls in order to have it take effect.
+ * This is our wrapper to do so.
+ */
+static void drbd_setbufsize(struct socket *sock, unsigned int snd,
+               unsigned int rcv)
+{
+       /* open coded SO_SNDBUF, SO_RCVBUF */
+       if (snd) {
+               sock->sk->sk_sndbuf = snd;
+               sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
+       }
+       if (rcv) {
+               sock->sk->sk_rcvbuf = rcv;
+               sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
+       }
+}
+
 static struct socket *drbd_try_connect(struct drbd_conf *mdev)
 {
        const char *what;
@@ -592,6 +610,8 @@ static struct socket *drbd_try_connect(struct drbd_conf *mdev)
 
        sock->sk->sk_rcvtimeo =
        sock->sk->sk_sndtimeo =  mdev->net_conf->try_connect_int*HZ;
+       drbd_setbufsize(sock, mdev->net_conf->sndbuf_size,
+                       mdev->net_conf->rcvbuf_size);
 
        /* explicitly bind to the configured IP as source IP
        *  for the outgoing connections.
@@ -670,6 +690,8 @@ static struct socket *drbd_wait_for_connect(struct drbd_conf *mdev)
        s_listen->sk->sk_reuse    = 1; /* SO_REUSEADDR */
        s_listen->sk->sk_rcvtimeo = timeo;
        s_listen->sk->sk_sndtimeo = timeo;
+       drbd_setbufsize(s_listen, mdev->net_conf->sndbuf_size,
+                       mdev->net_conf->rcvbuf_size);
 
        what = "bind before listen";
        err = s_listen->ops->bind(s_listen,
@@ -856,16 +878,6 @@ retry:
        sock->sk->sk_priority = TC_PRIO_INTERACTIVE_BULK;
        msock->sk->sk_priority = TC_PRIO_INTERACTIVE;
 
-       if (mdev->net_conf->sndbuf_size) {
-               sock->sk->sk_sndbuf = mdev->net_conf->sndbuf_size;
-               sock->sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
-       }
-
-       if (mdev->net_conf->rcvbuf_size) {
-               sock->sk->sk_rcvbuf = mdev->net_conf->rcvbuf_size;
-               sock->sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
-       }
-
        /* NOT YET ...
         * sock->sk->sk_sndtimeo = mdev->net_conf->timeout*HZ/10;
         * sock->sk->sk_rcvtimeo = MAX_SCHEDULE_TIMEOUT;
@@ -1154,17 +1166,6 @@ int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
        unsigned n_bios = 0;
        unsigned nr_pages = (ds + PAGE_SIZE -1) >> PAGE_SHIFT;
 
-       if (atomic_read(&mdev->new_c_uuid)) {
-               if (atomic_add_unless(&mdev->new_c_uuid, -1, 1)) {
-                       drbd_uuid_new_current(mdev);
-                       drbd_md_sync(mdev);
-
-                       atomic_dec(&mdev->new_c_uuid);
-                       wake_up(&mdev->misc_wait);
-               }
-               wait_event(mdev->misc_wait, !atomic_read(&mdev->new_c_uuid));
-       }
-
        /* In most cases, we will only need one bio.  But in case the lower
         * level restrictions happen to be different at this offset on this
         * side than those of the sending peer, we may need to submit the
@@ -1179,7 +1180,7 @@ next_bio:
        bio->bi_sector = sector;
        bio->bi_bdev = mdev->ldev->backing_bdev;
        /* we special case some flags in the multi-bio case, see below
-        * (BIO_RW_UNPLUG, BIO_RW_BARRIER) */
+        * (REQ_UNPLUG, REQ_HARDBARRIER) */
        bio->bi_rw = rw;
        bio->bi_private = e;
        bio->bi_end_io = drbd_endio_sec;
@@ -1208,16 +1209,16 @@ next_bio:
                bios = bios->bi_next;
                bio->bi_next = NULL;
 
-               /* strip off BIO_RW_UNPLUG unless it is the last bio */
+               /* strip off REQ_UNPLUG unless it is the last bio */
                if (bios)
-                       bio->bi_rw &= ~(1<<BIO_RW_UNPLUG);
+                       bio->bi_rw &= ~REQ_UNPLUG;
 
                drbd_generic_make_request(mdev, fault_type, bio);
 
-               /* strip off BIO_RW_BARRIER,
+               /* strip off REQ_HARDBARRIER,
                 * unless it is the first or last bio */
                if (bios && bios->bi_next)
-                       bios->bi_rw &= ~(1<<BIO_RW_BARRIER);
+                       bios->bi_rw &= ~REQ_HARDBARRIER;
        } while (bios);
        maybe_kick_lo(mdev);
        return 0;
@@ -1232,7 +1233,7 @@ fail:
 }
 
 /**
- * w_e_reissue() - Worker callback; Resubmit a bio, without BIO_RW_BARRIER set
+ * w_e_reissue() - Worker callback; Resubmit a bio, without REQ_HARDBARRIER set
  * @mdev:      DRBD device.
  * @w:         work object.
  * @cancel:    The connection will be closed anyways (unused in this callback)
@@ -1244,7 +1245,7 @@ int w_e_reissue(struct drbd_conf *mdev, struct drbd_work *w, int cancel) __relea
           (and DE_BARRIER_IN_NEXT_EPOCH_ISSUED in the previous Epoch)
           so that we can finish that epoch in drbd_may_finish_epoch().
           That is necessary if we already have a long chain of Epochs, before
-          we realize that BIO_RW_BARRIER is actually not supported */
+          we realize that REQ_HARDBARRIER is actually not supported */
 
        /* As long as the -ENOTSUPP on the barrier is reported immediately
           that will never trigger. If it is reported late, we will just
@@ -1823,14 +1824,14 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
                epoch = list_entry(e->epoch->list.prev, struct drbd_epoch, list);
                if (epoch == e->epoch) {
                        set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-                       rw |= (1<<BIO_RW_BARRIER);
+                       rw |= REQ_HARDBARRIER;
                        e->flags |= EE_IS_BARRIER;
                } else {
                        if (atomic_read(&epoch->epoch_size) > 1 ||
                            !test_bit(DE_CONTAINS_A_BARRIER, &epoch->flags)) {
                                set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &epoch->flags);
                                set_bit(DE_CONTAINS_A_BARRIER, &e->epoch->flags);
-                               rw |= (1<<BIO_RW_BARRIER);
+                               rw |= REQ_HARDBARRIER;
                                e->flags |= EE_IS_BARRIER;
                        }
                }
@@ -1840,10 +1841,10 @@ static int receive_Data(struct drbd_conf *mdev, struct p_header *h)
        dp_flags = be32_to_cpu(p->dp_flags);
        if (dp_flags & DP_HARDBARRIER) {
                dev_err(DEV, "ASSERT FAILED would have submitted barrier request\n");
-               /* rw |= (1<<BIO_RW_BARRIER); */
+               /* rw |= REQ_HARDBARRIER; */
        }
        if (dp_flags & DP_RW_SYNC)
-               rw |= (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG);
+               rw |= REQ_SYNC | REQ_UNPLUG;
        if (dp_flags & DP_MAY_SET_IN_SYNC)
                e->flags |= EE_MAY_SET_IN_SYNC;