]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - include/linux/blkdev.h
block: fix an address space warning in blk-map.c
[net-next-2.6.git] / include / linux / blkdev.h
index 89c855c5655c8f2204e0dd0d263571d267a1c6ce..780824edac16edbc8d8f0bc5e2959e801000464f 100644 (file)
@@ -124,6 +124,9 @@ struct request {
         * physical address coalescing is performed.
         */
        unsigned short nr_phys_segments;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+       unsigned short nr_integrity_segments;
+#endif
 
        unsigned short ioprio;
 
@@ -243,6 +246,7 @@ struct queue_limits {
 
        unsigned short          logical_block_size;
        unsigned short          max_segments;
+       unsigned short          max_integrity_segments;
 
        unsigned char           misaligned;
        unsigned char           discard_misaligned;
@@ -389,6 +393,7 @@ struct request_queue
 #define QUEUE_FLAG_DISCARD     16      /* supports DISCARD */
 #define QUEUE_FLAG_NOXMERGES   17      /* No extended merges */
 #define QUEUE_FLAG_ADD_RANDOM  18      /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD  19      /* supports SECDISCARD */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_CLUSTER) |            \
@@ -524,6 +529,8 @@ enum {
 #define blk_queue_stackable(q) \
        test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
 #define blk_queue_discard(q)   test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
+#define blk_queue_secdiscard(q)        (blk_queue_discard(q) && \
+       test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
 
 #define blk_noretry_request(rq) \
        ((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
@@ -918,10 +925,12 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
 }
 enum{
        BLKDEV_WAIT,    /* wait for completion */
-       BLKDEV_BARRIER, /*issue request with barrier */
+       BLKDEV_BARRIER, /* issue request with barrier */
+       BLKDEV_SECURE,  /* secure discard */
 };
 #define BLKDEV_IFL_WAIT                (1 << BLKDEV_WAIT)
 #define BLKDEV_IFL_BARRIER     (1 << BLKDEV_BARRIER)
+#define BLKDEV_IFL_SECURE      (1 << BLKDEV_SECURE)
 extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
                        unsigned long);
 extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
@@ -1088,11 +1097,11 @@ static inline int queue_dma_alignment(struct request_queue *q)
        return q ? q->dma_alignment : 511;
 }
 
-static inline int blk_rq_aligned(struct request_queue *q, void *addr,
+static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
                                 unsigned int len)
 {
        unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
-       return !((unsigned long)addr & alignment) && !(len & alignment);
+       return !(addr & alignment) && !(len & alignment);
 }
 
 /* assumes size > 256 */
@@ -1208,8 +1217,13 @@ struct blk_integrity {
 extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
 extern void blk_integrity_unregister(struct gendisk *);
 extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
-extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *);
-extern int blk_rq_count_integrity_sg(struct request *);
+extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
+                                  struct scatterlist *);
+extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
+extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
+                                 struct request *);
+extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
+                                  struct bio *);
 
 static inline
 struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1230,16 +1244,32 @@ static inline int blk_integrity_rq(struct request *rq)
        return bio_integrity(rq->bio);
 }
 
+static inline void blk_queue_max_integrity_segments(struct request_queue *q,
+                                                   unsigned int segs)
+{
+       q->limits.max_integrity_segments = segs;
+}
+
+static inline unsigned short
+queue_max_integrity_segments(struct request_queue *q)
+{
+       return q->limits.max_integrity_segments;
+}
+
 #else /* CONFIG_BLK_DEV_INTEGRITY */
 
 #define blk_integrity_rq(rq)                   (0)
-#define blk_rq_count_integrity_sg(a)           (0)
-#define blk_rq_map_integrity_sg(a, b)          (0)
+#define blk_rq_count_integrity_sg(a, b)                (0)
+#define blk_rq_map_integrity_sg(a, b, c)       (0)
 #define bdev_get_integrity(a)                  (0)
 #define blk_get_integrity(a)                   (0)
 #define blk_integrity_compare(a, b)            (0)
 #define blk_integrity_register(a, b)           (0)
 #define blk_integrity_unregister(a)            do { } while (0);
+#define blk_queue_max_integrity_segments(a, b) do { } while (0);
+#define queue_max_integrity_segments(a)                (0)
+#define blk_integrity_merge_rq(a, b, c)                (0)
+#define blk_integrity_merge_bio(a, b, c)       (0)
 
 #endif /* CONFIG_BLK_DEV_INTEGRITY */