]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - block/blk-merge.c
sg: fix a warning in blk_rq_aligned() call
[net-next-2.6.git] / block / blk-merge.c
index 5e7dc9973458406c5159b8ea3b87033302bf5fcf..6a725461654d86302c508faba05012f827d65780 100644 (file)
@@ -12,7 +12,6 @@
 static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
                                             struct bio *bio)
 {
-       unsigned int phys_size;
        struct bio_vec *bv, *bvprv = NULL;
        int cluster, i, high, highprv = 1;
        unsigned int seg_size, nr_phys_segs;
@@ -24,7 +23,7 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
        fbio = bio;
        cluster = test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags);
        seg_size = 0;
-       phys_size = nr_phys_segs = 0;
+       nr_phys_segs = 0;
        for_each_bio(bio) {
                bio_for_each_segment(bv, bio, i) {
                        /*
@@ -180,7 +179,7 @@ new_segment:
        }
 
        if (q->dma_drain_size && q->dma_drain_needed(rq)) {
-               if (rq->cmd_flags & REQ_RW)
+               if (rq->cmd_flags & REQ_WRITE)
                        memset(q->dma_drain_buffer, 0, q->dma_drain_size);
 
                sg->page_link &= ~0x02;
@@ -206,12 +205,11 @@ static inline int ll_new_hw_segment(struct request_queue *q,
 {
        int nr_phys_segs = bio_phys_segments(q, bio);
 
-       if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q)) {
-               req->cmd_flags |= REQ_NOMERGE;
-               if (req == q->last_merge)
-                       q->last_merge = NULL;
-               return 0;
-       }
+       if (req->nr_phys_segments + nr_phys_segs > queue_max_segments(q))
+               goto no_merge;
+
+       if (bio_integrity(bio) && blk_integrity_merge_bio(q, req, bio))
+               goto no_merge;
 
        /*
         * This will form the start of a new hw segment.  Bump both
@@ -219,6 +217,12 @@ static inline int ll_new_hw_segment(struct request_queue *q,
         */
        req->nr_phys_segments += nr_phys_segs;
        return 1;
+
+no_merge:
+       req->cmd_flags |= REQ_NOMERGE;
+       if (req == q->last_merge)
+               q->last_merge = NULL;
+       return 0;
 }
 
 int ll_back_merge_fn(struct request_queue *q, struct request *req,
@@ -226,7 +230,7 @@ int ll_back_merge_fn(struct request_queue *q, struct request *req,
 {
        unsigned short max_sectors;
 
-       if (unlikely(blk_pc_request(req)))
+       if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
                max_sectors = queue_max_hw_sectors(q);
        else
                max_sectors = queue_max_sectors(q);
@@ -250,7 +254,7 @@ int ll_front_merge_fn(struct request_queue *q, struct request *req,
 {
        unsigned short max_sectors;
 
-       if (unlikely(blk_pc_request(req)))
+       if (unlikely(req->cmd_type == REQ_TYPE_BLOCK_PC))
                max_sectors = queue_max_hw_sectors(q);
        else
                max_sectors = queue_max_sectors(q);
@@ -302,6 +306,9 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
        if (total_phys_segments > queue_max_segments(q))
                return 0;
 
+       if (blk_integrity_rq(req) && blk_integrity_merge_rq(q, req, next))
+               return 0;
+
        /* Merge is OK... */
        req->nr_phys_segments = total_phys_segments;
        return 1;
@@ -373,9 +380,6 @@ static int attempt_merge(struct request_queue *q, struct request *req,
            || next->special)
                return 0;
 
-       if (blk_integrity_rq(req) != blk_integrity_rq(next))
-               return 0;
-
        /*
         * If we are allowed to merge, then append bio list
         * from next to rq and release next. merge_requests_fn