]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - fs/xfs/linux-2.6/xfs_buf.c
Merge branch 'v2.6.36-rc8' into for-2.6.37/barrier
[net-next-2.6.git] / fs / xfs / linux-2.6 / xfs_buf.c
index ea79072f521012549c4c2673a6210340834202a4..1846a0dd7035e818bc2c88d52423b1a2cf151f8e 100644 (file)
@@ -440,12 +440,7 @@ _xfs_buf_find(
                ASSERT(btp == bp->b_target);
                if (bp->b_file_offset == range_base &&
                    bp->b_buffer_length == range_length) {
-                       /*
-                        * If we look at something, bring it to the
-                        * front of the list for next time.
-                        */
                        atomic_inc(&bp->b_hold);
-                       list_move(&bp->b_hash_list, &hash->bh_list);
                        goto found;
                }
        }
@@ -929,19 +924,7 @@ xfs_buf_iodone_work(
        xfs_buf_t               *bp =
                container_of(work, xfs_buf_t, b_iodone_work);
 
-       /*
-        * We can get an EOPNOTSUPP to ordered writes.  Here we clear the
-        * ordered flag and reissue them.  Because we can't tell the higher
-        * layers directly that they should not issue ordered I/O anymore, they
-        * need to check if the _XFS_BARRIER_FAILED flag was set during I/O completion.
-        */
-       if ((bp->b_error == EOPNOTSUPP) &&
-           (bp->b_flags & (XBF_ORDERED|XBF_ASYNC)) == (XBF_ORDERED|XBF_ASYNC)) {
-               trace_xfs_buf_ordered_retry(bp, _RET_IP_);
-               bp->b_flags &= ~XBF_ORDERED;
-               bp->b_flags |= _XFS_BARRIER_FAILED;
-               xfs_buf_iorequest(bp);
-       } else if (bp->b_iodone)
+       if (bp->b_iodone)
                (*(bp->b_iodone))(bp);
        else if (bp->b_flags & XBF_ASYNC)
                xfs_buf_relse(bp);
@@ -1200,7 +1183,7 @@ _xfs_buf_ioapply(
 
        if (bp->b_flags & XBF_ORDERED) {
                ASSERT(!(bp->b_flags & XBF_READ));
-               rw = WRITE_BARRIER;
+               rw = WRITE_FLUSH_FUA;
        } else if (bp->b_flags & XBF_LOG_BUFFER) {
                ASSERT(!(bp->b_flags & XBF_READ_AHEAD));
                bp->b_flags &= ~_XBF_RUN_QUEUES;
@@ -1443,8 +1426,7 @@ xfs_alloc_bufhash(
 {
        unsigned int            i;
 
-       btp->bt_hashshift = external ? 3 : 8;   /* 8 or 256 buckets */
-       btp->bt_hashmask = (1 << btp->bt_hashshift) - 1;
+       btp->bt_hashshift = external ? 3 : 12;  /* 8 or 4096 buckets */
        btp->bt_hash = kmem_zalloc_large((1 << btp->bt_hashshift) *
                                         sizeof(xfs_bufhash_t));
        for (i = 0; i < (1 << btp->bt_hashshift); i++) {
@@ -1938,7 +1920,8 @@ xfs_buf_init(void)
        if (!xfs_buf_zone)
                goto out;
 
-       xfslogd_workqueue = create_workqueue("xfslogd");
+       xfslogd_workqueue = alloc_workqueue("xfslogd",
+                                       WQ_RESCUER | WQ_HIGHPRI, 1);
        if (!xfslogd_workqueue)
                goto out_free_buf_zone;