struct request_queue *q = bdev_get_queue(bdev);
int type = flags & BLKDEV_IFL_BARRIER ?
DISCARD_BARRIER : DISCARD_NOBARRIER;
+ unsigned int max_discard_sectors;
struct bio *bio;
int ret = 0;
if (!blk_queue_discard(q))
return -EOPNOTSUPP;
- while (nr_sects && !ret) {
- unsigned int max_discard_sectors =
- min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ /*
+ * Ensure that max_discard_sectors is of the proper
+ * granularity
+ */
+ max_discard_sectors = min(q->limits.max_discard_sectors, UINT_MAX >> 9);
+ if (q->limits.discard_granularity) {
+ unsigned int disc_sects = q->limits.discard_granularity >> 9;
+
+ max_discard_sectors &= ~(disc_sects - 1);
+ }
+
+ if (flags & BLKDEV_IFL_SECURE) {
+ if (!blk_queue_secdiscard(q))
+ return -EOPNOTSUPP;
+ type |= DISCARD_SECURE;
+ }
+ while (nr_sects && !ret) {
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
ret = -ENOMEM;
int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags)
{
- int ret = 0;
+ int ret;
struct bio *bio;
struct bio_batch bb;
unsigned int sz, issued = 0;
return ret;
}
submit:
+ ret = 0;
while (nr_sects != 0) {
bio = bio_alloc(gfp_mask,
min(nr_sects, (sector_t)BIO_MAX_PAGES));
- if (!bio)
+ if (!bio) {
+ ret = -ENOMEM;
break;
+ }
bio->bi_sector = sector;
bio->bi_bdev = bdev;
if (ret < (sz << 9))
break;
}
+ ret = 0;
issued++;
submit_bio(WRITE, bio);
}