]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/block/virtio_blk.c
kbuild: fix oldnoconfig to do the right thing
[net-next-2.6.git] / drivers / block / virtio_blk.c
CommitLineData
e467cde2
RR
1//#define DEBUG
2#include <linux/spinlock.h>
5a0e3ad6 3#include <linux/slab.h>
e467cde2 4#include <linux/blkdev.h>
8a6cfeb6 5#include <linux/smp_lock.h>
e467cde2
RR
6#include <linux/hdreg.h>
7#include <linux/virtio.h>
8#include <linux/virtio_blk.h>
3d1266c7
JA
9#include <linux/scatterlist.h>
10
4f3bf19c 11#define PART_BITS 4
e467cde2 12
d50ed907 13static int major, index;
4f3bf19c 14
e467cde2
RR
15struct virtio_blk
16{
17 spinlock_t lock;
18
19 struct virtio_device *vdev;
20 struct virtqueue *vq;
21
22 /* The disk structure for the kernel. */
23 struct gendisk *disk;
24
25 /* Request tracking. */
26 struct list_head reqs;
27
28 mempool_t *pool;
29
0864b79a
RR
30 /* What host tells us, plus 2 for header & tailer. */
31 unsigned int sg_elems;
32
e467cde2 33 /* Scatterlist: can be too big for stack. */
0864b79a 34 struct scatterlist sg[/*sg_elems*/];
e467cde2
RR
35};
36
37struct virtblk_req
38{
39 struct list_head list;
40 struct request *req;
41 struct virtio_blk_outhdr out_hdr;
1cde26f9 42 struct virtio_scsi_inhdr in_hdr;
cb38fa23 43 u8 status;
e467cde2
RR
44};
45
18445c4d 46static void blk_done(struct virtqueue *vq)
e467cde2
RR
47{
48 struct virtio_blk *vblk = vq->vdev->priv;
49 struct virtblk_req *vbr;
50 unsigned int len;
51 unsigned long flags;
52
53 spin_lock_irqsave(&vblk->lock, flags);
09ec6b69 54 while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) {
8316982a 55 int error;
1cde26f9 56
cb38fa23 57 switch (vbr->status) {
e467cde2 58 case VIRTIO_BLK_S_OK:
8316982a 59 error = 0;
e467cde2
RR
60 break;
61 case VIRTIO_BLK_S_UNSUPP:
8316982a 62 error = -ENOTTY;
e467cde2
RR
63 break;
64 default:
8316982a 65 error = -EIO;
e467cde2
RR
66 break;
67 }
68
33659ebb
CH
69 switch (vbr->req->cmd_type) {
70 case REQ_TYPE_BLOCK_PC:
1cde26f9
HR
71 vbr->req->resid_len = vbr->in_hdr.residual;
72 vbr->req->sense_len = vbr->in_hdr.sense_len;
73 vbr->req->errors = vbr->in_hdr.errors;
33659ebb
CH
74 break;
75 case REQ_TYPE_SPECIAL:
4cb2ea28 76 vbr->req->errors = (error != 0);
33659ebb 77 break;
15fa6e81
JA
78 default:
79 break;
33659ebb 80 }
1cde26f9 81
40cbbb78 82 __blk_end_request_all(vbr->req, error);
e467cde2
RR
83 list_del(&vbr->list);
84 mempool_free(vbr, vblk->pool);
85 }
86 /* In case queue is stopped waiting for more buffers. */
87 blk_start_queue(vblk->disk->queue);
88 spin_unlock_irqrestore(&vblk->lock, flags);
e467cde2
RR
89}
90
91static bool do_req(struct request_queue *q, struct virtio_blk *vblk,
92 struct request *req)
93{
1cde26f9 94 unsigned long num, out = 0, in = 0;
e467cde2
RR
95 struct virtblk_req *vbr;
96
97 vbr = mempool_alloc(vblk->pool, GFP_ATOMIC);
98 if (!vbr)
99 /* When another request finishes we'll try again. */
100 return false;
101
102 vbr->req = req;
dd40e456
FT
103
104 if (req->cmd_flags & REQ_FLUSH) {
105 vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH;
4cb2ea28 106 vbr->out_hdr.sector = 0;
107 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
dd40e456
FT
108 } else {
109 switch (req->cmd_type) {
110 case REQ_TYPE_FS:
111 vbr->out_hdr.type = 0;
112 vbr->out_hdr.sector = blk_rq_pos(vbr->req);
113 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
114 break;
115 case REQ_TYPE_BLOCK_PC:
116 vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD;
f1b0ef06
CH
117 vbr->out_hdr.sector = 0;
118 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
119 break;
dd40e456
FT
120 case REQ_TYPE_SPECIAL:
121 vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID;
122 vbr->out_hdr.sector = 0;
123 vbr->out_hdr.ioprio = req_get_ioprio(vbr->req);
124 break;
125 default:
126 /* We don't put anything else in the queue. */
127 BUG();
f1b0ef06 128 }
e467cde2
RR
129 }
130
33659ebb 131 if (vbr->req->cmd_flags & REQ_HARDBARRIER)
e467cde2
RR
132 vbr->out_hdr.type |= VIRTIO_BLK_T_BARRIER;
133
1cde26f9 134 sg_set_buf(&vblk->sg[out++], &vbr->out_hdr, sizeof(vbr->out_hdr));
e467cde2 135
1cde26f9
HR
136 /*
137 * If this is a packet command we need a couple of additional headers.
138 * Behind the normal outhdr we put a segment with the scsi command
139 * block, and before the normal inhdr we put the sense data and the
140 * inhdr with additional status information before the normal inhdr.
141 */
33659ebb 142 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC)
1cde26f9
HR
143 sg_set_buf(&vblk->sg[out++], vbr->req->cmd, vbr->req->cmd_len);
144
145 num = blk_rq_map_sg(q, vbr->req, vblk->sg + out);
146
33659ebb 147 if (vbr->req->cmd_type == REQ_TYPE_BLOCK_PC) {
1cde26f9
HR
148 sg_set_buf(&vblk->sg[num + out + in++], vbr->req->sense, 96);
149 sg_set_buf(&vblk->sg[num + out + in++], &vbr->in_hdr,
150 sizeof(vbr->in_hdr));
151 }
152
153 sg_set_buf(&vblk->sg[num + out + in++], &vbr->status,
154 sizeof(vbr->status));
155
156 if (num) {
157 if (rq_data_dir(vbr->req) == WRITE) {
158 vbr->out_hdr.type |= VIRTIO_BLK_T_OUT;
159 out += num;
160 } else {
161 vbr->out_hdr.type |= VIRTIO_BLK_T_IN;
162 in += num;
163 }
e467cde2
RR
164 }
165
09ec6b69 166 if (virtqueue_add_buf(vblk->vq, vblk->sg, out, in, vbr) < 0) {
e467cde2
RR
167 mempool_free(vbr, vblk->pool);
168 return false;
169 }
170
171 list_add_tail(&vbr->list, &vblk->reqs);
172 return true;
173}
174
175static void do_virtblk_request(struct request_queue *q)
176{
6c3b46f7 177 struct virtio_blk *vblk = q->queuedata;
e467cde2
RR
178 struct request *req;
179 unsigned int issued = 0;
180
9934c8c0 181 while ((req = blk_peek_request(q)) != NULL) {
0864b79a 182 BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
e467cde2
RR
183
184 /* If this request fails, stop queue and wait for something to
185 finish to restart it. */
186 if (!do_req(q, vblk, req)) {
187 blk_stop_queue(q);
188 break;
189 }
9934c8c0 190 blk_start_request(req);
e467cde2
RR
191 issued++;
192 }
193
194 if (issued)
09ec6b69 195 virtqueue_kick(vblk->vq);
e467cde2
RR
196}
197
4cb2ea28 198/* return id (s/n) string for *disk to *id_str
199 */
200static int virtblk_get_id(struct gendisk *disk, char *id_str)
201{
202 struct virtio_blk *vblk = disk->private_data;
203 struct request *req;
204 struct bio *bio;
205
206 bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES,
207 GFP_KERNEL);
208 if (IS_ERR(bio))
209 return PTR_ERR(bio);
210
211 req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL);
212 if (IS_ERR(req)) {
213 bio_put(bio);
214 return PTR_ERR(req);
215 }
216
217 req->cmd_type = REQ_TYPE_SPECIAL;
218 return blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
219}
220
8a6cfeb6 221static int virtblk_locked_ioctl(struct block_device *bdev, fmode_t mode,
e467cde2
RR
222 unsigned cmd, unsigned long data)
223{
1cde26f9
HR
224 struct gendisk *disk = bdev->bd_disk;
225 struct virtio_blk *vblk = disk->private_data;
226
227 /*
228 * Only allow the generic SCSI ioctls if the host can support it.
229 */
230 if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
d9ecdea7 231 return -ENOTTY;
1cde26f9 232
3225beab
RR
233 return scsi_cmd_ioctl(disk->queue, disk, mode, cmd,
234 (void __user *)data);
e467cde2
RR
235}
236
8a6cfeb6
AB
237static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
238 unsigned int cmd, unsigned long param)
239{
240 int ret;
241
242 lock_kernel();
243 ret = virtblk_locked_ioctl(bdev, mode, cmd, param);
244 unlock_kernel();
245
246 return ret;
247}
248
135da0b0
CB
249/* We provide getgeo only to please some old bootloader/partitioning tools */
250static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
251{
48e4043d
RH
252 struct virtio_blk *vblk = bd->bd_disk->private_data;
253 struct virtio_blk_geometry vgeo;
254 int err;
255
256 /* see if the host passed in geometry config */
257 err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY,
258 offsetof(struct virtio_blk_config, geometry),
259 &vgeo);
260
261 if (!err) {
262 geo->heads = vgeo.heads;
263 geo->sectors = vgeo.sectors;
264 geo->cylinders = vgeo.cylinders;
265 } else {
266 /* some standard values, similar to sd */
267 geo->heads = 1 << 6;
268 geo->sectors = 1 << 5;
269 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
270 }
135da0b0
CB
271 return 0;
272}
273
83d5cde4 274static const struct block_device_operations virtblk_fops = {
8a6cfeb6 275 .ioctl = virtblk_ioctl,
135da0b0
CB
276 .owner = THIS_MODULE,
277 .getgeo = virtblk_getgeo,
e467cde2
RR
278};
279
d50ed907
CB
280static int index_to_minor(int index)
281{
282 return index << PART_BITS;
283}
284
a5eb9e4f
RH
285static ssize_t virtblk_serial_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 struct gendisk *disk = dev_to_disk(dev);
289 int err;
290
291 /* sysfs gives us a PAGE_SIZE buffer */
292 BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
293
294 buf[VIRTIO_BLK_ID_BYTES] = '\0';
295 err = virtblk_get_id(disk, buf);
296 if (!err)
297 return strlen(buf);
298
299 if (err == -EIO) /* Unsupported? Make it empty. */
300 return 0;
301
302 return err;
303}
304DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL);
305
98e94444 306static int __devinit virtblk_probe(struct virtio_device *vdev)
e467cde2
RR
307{
308 struct virtio_blk *vblk;
69740c8b 309 struct request_queue *q;
4f3bf19c 310 int err;
e467cde2 311 u64 cap;
69740c8b
CH
312 u32 v, blk_size, sg_elems, opt_io_size;
313 u16 min_io_size;
314 u8 physical_block_exp, alignment_offset;
e467cde2 315
d50ed907 316 if (index_to_minor(index) >= 1 << MINORBITS)
4f3bf19c
CB
317 return -ENOSPC;
318
0864b79a
RR
319 /* We need to know how many segments before we allocate. */
320 err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX,
321 offsetof(struct virtio_blk_config, seg_max),
322 &sg_elems);
a5b365a6
CH
323
324 /* We need at least one SG element, whatever they say. */
325 if (err || !sg_elems)
0864b79a
RR
326 sg_elems = 1;
327
328 /* We need an extra sg elements at head and tail. */
329 sg_elems += 2;
330 vdev->priv = vblk = kmalloc(sizeof(*vblk) +
331 sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL);
e467cde2
RR
332 if (!vblk) {
333 err = -ENOMEM;
334 goto out;
335 }
336
337 INIT_LIST_HEAD(&vblk->reqs);
338 spin_lock_init(&vblk->lock);
339 vblk->vdev = vdev;
0864b79a
RR
340 vblk->sg_elems = sg_elems;
341 sg_init_table(vblk->sg, vblk->sg_elems);
e467cde2
RR
342
343 /* We expect one virtqueue, for output. */
d2a7ddda 344 vblk->vq = virtio_find_single_vq(vdev, blk_done, "requests");
e467cde2
RR
345 if (IS_ERR(vblk->vq)) {
346 err = PTR_ERR(vblk->vq);
347 goto out_free_vblk;
348 }
349
350 vblk->pool = mempool_create_kmalloc_pool(1,sizeof(struct virtblk_req));
351 if (!vblk->pool) {
352 err = -ENOMEM;
353 goto out_free_vq;
354 }
355
e467cde2 356 /* FIXME: How many partitions? How long is a piece of string? */
4f3bf19c 357 vblk->disk = alloc_disk(1 << PART_BITS);
e467cde2
RR
358 if (!vblk->disk) {
359 err = -ENOMEM;
4f3bf19c 360 goto out_mempool;
e467cde2
RR
361 }
362
69740c8b
CH
363 q = vblk->disk->queue = blk_init_queue(do_virtblk_request, &vblk->lock);
364 if (!q) {
e467cde2
RR
365 err = -ENOMEM;
366 goto out_put_disk;
367 }
368
69740c8b 369 q->queuedata = vblk;
7d116b62 370
d50ed907
CB
371 if (index < 26) {
372 sprintf(vblk->disk->disk_name, "vd%c", 'a' + index % 26);
373 } else if (index < (26 + 1) * 26) {
374 sprintf(vblk->disk->disk_name, "vd%c%c",
375 'a' + index / 26 - 1, 'a' + index % 26);
376 } else {
377 const unsigned int m1 = (index / 26 - 1) / 26 - 1;
378 const unsigned int m2 = (index / 26 - 1) % 26;
379 const unsigned int m3 = index % 26;
380 sprintf(vblk->disk->disk_name, "vd%c%c%c",
381 'a' + m1, 'a' + m2, 'a' + m3);
382 }
383
e467cde2 384 vblk->disk->major = major;
d50ed907 385 vblk->disk->first_minor = index_to_minor(index);
e467cde2
RR
386 vblk->disk->private_data = vblk;
387 vblk->disk->fops = &virtblk_fops;
c4839346 388 vblk->disk->driverfs_dev = &vdev->dev;
d50ed907 389 index++;
4f3bf19c 390
10bc310c
CH
391 if (virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH)) {
392 /*
393 * If the FLUSH feature is supported we do have support for
394 * flushing a volatile write cache on the host. Use that
395 * to implement write barrier support.
396 */
00fff265 397 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN_FLUSH);
10bc310c
CH
398 } else if (virtio_has_feature(vdev, VIRTIO_BLK_F_BARRIER)) {
399 /*
400 * If the BARRIER feature is supported the host expects us
401 * to order request by tags. This implies there is not
402 * volatile write cache on the host, and that the host
403 * never re-orders outstanding I/O. This feature is not
404 * useful for real life scenarious and deprecated.
405 */
00fff265 406 blk_queue_ordered(q, QUEUE_ORDERED_TAG);
10bc310c
CH
407 } else {
408 /*
409 * If the FLUSH feature is not supported we must assume that
410 * the host does not perform any kind of volatile write
411 * caching. We still need to drain the queue to provider
412 * proper barrier semantics.
413 */
2f9e825d 414 blk_queue_ordered(q, QUEUE_ORDERED_DRAIN);
10bc310c 415 }
e467cde2 416
3ef53609
CB
417 /* If disk is read-only in the host, the guest should obey */
418 if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
419 set_disk_ro(vblk->disk, 1);
420
a586d4f6 421 /* Host must always specify the capacity. */
72e61eb4
RR
422 vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity),
423 &cap, sizeof(cap));
e467cde2
RR
424
425 /* If capacity is too big, truncate with warning. */
426 if ((sector_t)cap != cap) {
427 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
428 (unsigned long long)cap);
429 cap = (sector_t)-1;
430 }
431 set_capacity(vblk->disk, cap);
432
0864b79a 433 /* We can handle whatever the host told us to handle. */
ee714f2d 434 blk_queue_max_segments(q, vblk->sg_elems-2);
0864b79a 435
4eff3cae 436 /* No need to bounce any requests */
69740c8b 437 blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
4eff3cae 438
4b7f7e20 439 /* No real sector limit. */
ee714f2d 440 blk_queue_max_hw_sectors(q, -1U);
4b7f7e20 441
a586d4f6
RR
442 /* Host can optionally specify maximum segment size and number of
443 * segments. */
444 err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX,
445 offsetof(struct virtio_blk_config, size_max),
446 &v);
e467cde2 447 if (!err)
69740c8b 448 blk_queue_max_segment_size(q, v);
4b7f7e20 449 else
69740c8b 450 blk_queue_max_segment_size(q, -1U);
e467cde2 451
066f4d82
CB
452 /* Host can optionally specify the block size of the device */
453 err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE,
454 offsetof(struct virtio_blk_config, blk_size),
455 &blk_size);
456 if (!err)
69740c8b
CH
457 blk_queue_logical_block_size(q, blk_size);
458 else
459 blk_size = queue_logical_block_size(q);
460
461 /* Use topology information if available */
462 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
463 offsetof(struct virtio_blk_config, physical_block_exp),
464 &physical_block_exp);
465 if (!err && physical_block_exp)
466 blk_queue_physical_block_size(q,
467 blk_size * (1 << physical_block_exp));
468
469 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
470 offsetof(struct virtio_blk_config, alignment_offset),
471 &alignment_offset);
472 if (!err && alignment_offset)
473 blk_queue_alignment_offset(q, blk_size * alignment_offset);
474
475 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
476 offsetof(struct virtio_blk_config, min_io_size),
477 &min_io_size);
478 if (!err && min_io_size)
479 blk_queue_io_min(q, blk_size * min_io_size);
480
481 err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY,
482 offsetof(struct virtio_blk_config, opt_io_size),
483 &opt_io_size);
484 if (!err && opt_io_size)
485 blk_queue_io_opt(q, blk_size * opt_io_size);
486
066f4d82 487
e467cde2 488 add_disk(vblk->disk);
a5eb9e4f
RH
489 err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial);
490 if (err)
491 goto out_del_disk;
492
e467cde2
RR
493 return 0;
494
a5eb9e4f
RH
495out_del_disk:
496 del_gendisk(vblk->disk);
497 blk_cleanup_queue(vblk->disk->queue);
e467cde2
RR
498out_put_disk:
499 put_disk(vblk->disk);
e467cde2
RR
500out_mempool:
501 mempool_destroy(vblk->pool);
502out_free_vq:
d2a7ddda 503 vdev->config->del_vqs(vdev);
e467cde2
RR
504out_free_vblk:
505 kfree(vblk);
506out:
507 return err;
508}
509
98e94444 510static void __devexit virtblk_remove(struct virtio_device *vdev)
e467cde2
RR
511{
512 struct virtio_blk *vblk = vdev->priv;
e467cde2 513
6e5aa7ef 514 /* Nothing should be pending. */
e467cde2 515 BUG_ON(!list_empty(&vblk->reqs));
6e5aa7ef
RR
516
517 /* Stop all the virtqueues. */
518 vdev->config->reset(vdev);
519
ac9d463a 520 del_gendisk(vblk->disk);
e467cde2
RR
521 blk_cleanup_queue(vblk->disk->queue);
522 put_disk(vblk->disk);
e467cde2 523 mempool_destroy(vblk->pool);
d2a7ddda 524 vdev->config->del_vqs(vdev);
e467cde2
RR
525 kfree(vblk);
526}
527
47483e25 528static const struct virtio_device_id id_table[] = {
e467cde2
RR
529 { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
530 { 0 },
531};
532
c45a6816
RR
533static unsigned int features[] = {
534 VIRTIO_BLK_F_BARRIER, VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX,
066f4d82 535 VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
69740c8b 536 VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY
c45a6816
RR
537};
538
4fbfff76
RM
539/*
540 * virtio_blk causes spurious section mismatch warning by
541 * simultaneously referring to a __devinit and a __devexit function.
542 * Use __refdata to avoid this warning.
543 */
544static struct virtio_driver __refdata virtio_blk = {
c45a6816
RR
545 .feature_table = features,
546 .feature_table_size = ARRAY_SIZE(features),
e467cde2
RR
547 .driver.name = KBUILD_MODNAME,
548 .driver.owner = THIS_MODULE,
549 .id_table = id_table,
550 .probe = virtblk_probe,
551 .remove = __devexit_p(virtblk_remove),
552};
553
554static int __init init(void)
555{
4f3bf19c
CB
556 major = register_blkdev(0, "virtblk");
557 if (major < 0)
558 return major;
e467cde2
RR
559 return register_virtio_driver(&virtio_blk);
560}
561
562static void __exit fini(void)
563{
4f3bf19c 564 unregister_blkdev(major, "virtblk");
e467cde2
RR
565 unregister_virtio_driver(&virtio_blk);
566}
567module_init(init);
568module_exit(fini);
569
570MODULE_DEVICE_TABLE(virtio, id_table);
571MODULE_DESCRIPTION("Virtio block driver");
572MODULE_LICENSE("GPL");