2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/blkdev.h>
25 #include <linux/major.h>
26 #include <linux/delay.h>
27 #include <linux/hdreg.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_cmnd.h>
30 #include <scsi/scsi_eh.h>
31 #include <scsi/scsi_dbg.h>
35 #include "StorVscApi.h"
38 #define BLKVSC_MINORS 64
40 enum blkvsc_device_type {
47 * This request ties the struct request and struct
48 * blkvsc_request/hv_storvsc_request together A struct request may be
49 * represented by 1 or more struct blkvsc_request
51 struct blkvsc_request_group {
54 struct list_head blkvsc_req_list; /* list of blkvsc_requests */
57 struct blkvsc_request {
58 /* blkvsc_request_group.blkvsc_req_list */
59 struct list_head req_entry;
61 /* block_device_context.pending_list */
62 struct list_head pend_entry;
64 /* This may be null if we generate a request internally */
67 struct block_device_context *dev;
69 /* The group this request is part of. Maybe null */
70 struct blkvsc_request_group *group;
72 wait_queue_head_t wevent;
76 sector_t sector_start;
77 unsigned long sector_count;
79 unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
80 unsigned char cmd_len;
81 unsigned char cmnd[MAX_COMMAND_SIZE];
83 struct hv_storvsc_request request;
85 * !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap,
86 * because - The extension buffer falls right here and is pointed to by
88 * Which sounds like a horrible idea, who designed this?
92 /* Per device structure */
93 struct block_device_context {
94 /* point back to our device context */
95 struct device_context *device_ctx;
96 struct kmem_cache *request_pool;
99 enum blkvsc_device_type device_type;
100 struct list_head pending_list;
102 unsigned char device_id[64];
103 unsigned int device_id_len;
104 int num_outstanding_reqs;
106 int media_not_present;
107 unsigned int sector_size;
111 unsigned char target;
116 struct blkvsc_driver_context {
117 /* !! These must be the first 2 fields !! */
118 /* FIXME this is a bug! */
119 struct driver_context drv_ctx;
120 struct storvsc_driver_object drv_obj;
124 static int blkvsc_probe(struct device *dev);
125 static int blkvsc_remove(struct device *device);
126 static void blkvsc_shutdown(struct device *device);
128 static int blkvsc_open(struct block_device *bdev, fmode_t mode);
129 static int blkvsc_release(struct gendisk *disk, fmode_t mode);
130 static int blkvsc_media_changed(struct gendisk *gd);
131 static int blkvsc_revalidate_disk(struct gendisk *gd);
132 static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
133 static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
134 unsigned cmd, unsigned long argument);
135 static void blkvsc_request(struct request_queue *queue);
136 static void blkvsc_request_completion(struct hv_storvsc_request *request);
137 static int blkvsc_do_request(struct block_device_context *blkdev,
138 struct request *req);
139 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
140 void (*request_completion)(struct hv_storvsc_request *));
141 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req);
142 static void blkvsc_cmd_completion(struct hv_storvsc_request *request);
143 static int blkvsc_do_inquiry(struct block_device_context *blkdev);
144 static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
145 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
146 static int blkvsc_do_flush(struct block_device_context *blkdev);
147 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
148 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
151 static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
153 /* The one and only one */
154 static struct blkvsc_driver_context g_blkvsc_drv;
156 static struct block_device_operations block_ops = {
157 .owner = THIS_MODULE,
159 .release = blkvsc_release,
160 .media_changed = blkvsc_media_changed,
161 .revalidate_disk = blkvsc_revalidate_disk,
162 .getgeo = blkvsc_getgeo,
163 .ioctl = blkvsc_ioctl,
167 * blkvsc_drv_init - BlkVsc driver initialization.
169 static int blkvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
171 struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv.drv_obj;
172 struct driver_context *drv_ctx = &g_blkvsc_drv.drv_ctx;
175 DPRINT_ENTER(BLKVSC_DRV);
177 vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
179 storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
181 /* Callback to client driver to complete the initialization */
182 drv_init(&storvsc_drv_obj->Base);
184 drv_ctx->driver.name = storvsc_drv_obj->Base.name;
185 memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType,
186 sizeof(struct hv_guid));
188 drv_ctx->probe = blkvsc_probe;
189 drv_ctx->remove = blkvsc_remove;
190 drv_ctx->shutdown = blkvsc_shutdown;
192 /* The driver belongs to vmbus */
193 ret = vmbus_child_driver_register(drv_ctx);
195 DPRINT_EXIT(BLKVSC_DRV);
200 static int blkvsc_drv_exit_cb(struct device *dev, void *data)
202 struct device **curr = (struct device **)data;
204 return 1; /* stop iterating */
207 static void blkvsc_drv_exit(void)
209 struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv.drv_obj;
210 struct driver_context *drv_ctx = &g_blkvsc_drv.drv_ctx;
211 struct device *current_dev;
214 DPRINT_ENTER(BLKVSC_DRV);
220 ret = driver_for_each_device(&drv_ctx->driver, NULL,
221 (void *) ¤t_dev,
225 DPRINT_WARN(BLKVSC_DRV,
226 "driver_for_each_device returned %d", ret);
229 if (current_dev == NULL)
232 /* Initiate removal from the top-down */
233 device_unregister(current_dev);
236 if (storvsc_drv_obj->Base.OnCleanup)
237 storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
239 vmbus_child_driver_unregister(drv_ctx);
241 DPRINT_EXIT(BLKVSC_DRV);
247 * blkvsc_probe - Add a new device for this driver
249 static int blkvsc_probe(struct device *device)
251 struct driver_context *driver_ctx =
252 driver_to_driver_context(device->driver);
253 struct blkvsc_driver_context *blkvsc_drv_ctx =
254 (struct blkvsc_driver_context *)driver_ctx;
255 struct storvsc_driver_object *storvsc_drv_obj =
256 &blkvsc_drv_ctx->drv_obj;
257 struct device_context *device_ctx = device_to_device_context(device);
258 struct hv_device *device_obj = &device_ctx->device_obj;
260 struct block_device_context *blkdev = NULL;
261 struct storvsc_device_info device_info;
265 static int ide0_registered;
266 static int ide1_registered;
268 DPRINT_ENTER(BLKVSC_DRV);
270 DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
272 if (!storvsc_drv_obj->Base.OnDeviceAdd) {
273 DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
278 blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
284 INIT_LIST_HEAD(&blkdev->pending_list);
286 /* Initialize what we can here */
287 spin_lock_init(&blkdev->lock);
289 ASSERT(sizeof(struct blkvsc_request_group) <=
290 sizeof(struct blkvsc_request));
292 blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
293 sizeof(struct blkvsc_request) +
294 storvsc_drv_obj->RequestExtSize, 0,
295 SLAB_HWCACHE_ALIGN, NULL);
296 if (!blkdev->request_pool) {
302 /* Call to the vsc driver to add the device */
303 ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
305 DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
309 blkdev->device_ctx = device_ctx;
310 /* this identified the device 0 or 1 */
311 blkdev->target = device_info.TargetId;
312 /* this identified the ide ctrl 0 or 1 */
313 blkdev->path = device_info.PathId;
315 dev_set_drvdata(device, blkdev);
317 /* Calculate the major and device num */
318 if (blkdev->path == 0) {
320 devnum = blkdev->path + blkdev->target; /* 0 or 1 */
322 if (!ide0_registered) {
323 ret = register_blkdev(major, "ide");
325 DPRINT_ERR(BLKVSC_DRV,
326 "register_blkdev() failed! ret %d",
333 } else if (blkdev->path == 1) {
335 devnum = blkdev->path + blkdev->target + 1; /* 2 or 3 */
337 if (!ide1_registered) {
338 ret = register_blkdev(major, "ide");
340 DPRINT_ERR(BLKVSC_DRV,
341 "register_blkdev() failed! ret %d",
349 DPRINT_ERR(BLKVSC_DRV, "invalid pathid");
354 DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major);
356 blkdev->gd = alloc_disk(BLKVSC_MINORS);
358 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
363 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
365 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
366 blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
367 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
368 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
369 blk_queue_dma_alignment(blkdev->gd->queue, 511);
371 blkdev->gd->major = major;
372 if (devnum == 1 || devnum == 3)
373 blkdev->gd->first_minor = BLKVSC_MINORS;
375 blkdev->gd->first_minor = 0;
376 blkdev->gd->fops = &block_ops;
377 blkdev->gd->private_data = blkdev;
378 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + devnum);
380 blkvsc_do_inquiry(blkdev);
381 if (blkdev->device_type == DVD_TYPE) {
382 set_disk_ro(blkdev->gd, 1);
383 blkdev->gd->flags |= GENHD_FL_REMOVABLE;
384 blkvsc_do_read_capacity(blkdev);
386 blkvsc_do_read_capacity16(blkdev);
389 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
390 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
392 add_disk(blkdev->gd);
394 DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d",
395 blkdev->gd->disk_name, (unsigned long)blkdev->capacity,
396 blkdev->sector_size);
401 storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
405 if (blkdev->request_pool) {
406 kmem_cache_destroy(blkdev->request_pool);
407 blkdev->request_pool = NULL;
413 DPRINT_EXIT(BLKVSC_DRV);
418 static void blkvsc_shutdown(struct device *device)
420 struct block_device_context *blkdev = dev_get_drvdata(device);
426 DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n",
427 blkdev->users, blkdev->gd->disk_name);
429 spin_lock_irqsave(&blkdev->lock, flags);
431 blkdev->shutting_down = 1;
433 blk_stop_queue(blkdev->gd->queue);
435 spin_unlock_irqrestore(&blkdev->lock, flags);
437 while (blkdev->num_outstanding_reqs) {
438 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
439 blkdev->num_outstanding_reqs);
443 blkvsc_do_flush(blkdev);
445 spin_lock_irqsave(&blkdev->lock, flags);
447 blkvsc_cancel_pending_reqs(blkdev);
449 spin_unlock_irqrestore(&blkdev->lock, flags);
452 static int blkvsc_do_flush(struct block_device_context *blkdev)
454 struct blkvsc_request *blkvsc_req;
456 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n");
458 if (blkdev->device_type != HARDDISK_TYPE)
461 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
465 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
466 init_waitqueue_head(&blkvsc_req->wevent);
467 blkvsc_req->dev = blkdev;
468 blkvsc_req->req = NULL;
469 blkvsc_req->write = 0;
471 blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
472 blkvsc_req->request.DataBuffer.Offset = 0;
473 blkvsc_req->request.DataBuffer.Length = 0;
475 blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
476 blkvsc_req->cmd_len = 10;
479 * Set this here since the completion routine may be invoked and
480 * completed before we return
482 blkvsc_req->cond = 0;
483 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
485 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
487 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
492 /* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */
493 static int blkvsc_do_inquiry(struct block_device_context *blkdev)
495 struct blkvsc_request *blkvsc_req;
496 struct page *page_buf;
498 unsigned char device_type;
500 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n");
502 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
506 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
507 page_buf = alloc_page(GFP_KERNEL);
509 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
513 init_waitqueue_head(&blkvsc_req->wevent);
514 blkvsc_req->dev = blkdev;
515 blkvsc_req->req = NULL;
516 blkvsc_req->write = 0;
518 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
519 blkvsc_req->request.DataBuffer.Offset = 0;
520 blkvsc_req->request.DataBuffer.Length = 64;
522 blkvsc_req->cmnd[0] = INQUIRY;
523 blkvsc_req->cmnd[1] = 0x1; /* Get product data */
524 blkvsc_req->cmnd[2] = 0x83; /* mode page 83 */
525 blkvsc_req->cmnd[4] = 64;
526 blkvsc_req->cmd_len = 6;
529 * Set this here since the completion routine may be invoked and
530 * completed before we return
532 blkvsc_req->cond = 0;
534 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
536 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n",
537 blkvsc_req, blkvsc_req->cond);
539 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
541 buf = kmap(page_buf);
543 /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
545 device_type = buf[0] & 0x1F;
547 if (device_type == 0x0) {
548 blkdev->device_type = HARDDISK_TYPE;
549 } else if (device_type == 0x5) {
550 blkdev->device_type = DVD_TYPE;
552 /* TODO: this is currently unsupported device type */
553 blkdev->device_type = UNKNOWN_DEV_TYPE;
556 DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type);
558 blkdev->device_id_len = buf[7];
559 if (blkdev->device_id_len > 64)
560 blkdev->device_id_len = 64;
562 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
563 /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
564 * blkdev->device_id_len); */
568 __free_page(page_buf);
570 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
575 /* Do a scsi READ_CAPACITY cmd here to get the size of the disk */
576 static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
578 struct blkvsc_request *blkvsc_req;
579 struct page *page_buf;
581 struct scsi_sense_hdr sense_hdr;
583 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
585 blkdev->sector_size = 0;
586 blkdev->capacity = 0;
587 blkdev->media_not_present = 0; /* assume a disk is present */
589 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
593 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
594 page_buf = alloc_page(GFP_KERNEL);
596 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
600 init_waitqueue_head(&blkvsc_req->wevent);
601 blkvsc_req->dev = blkdev;
602 blkvsc_req->req = NULL;
603 blkvsc_req->write = 0;
605 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
606 blkvsc_req->request.DataBuffer.Offset = 0;
607 blkvsc_req->request.DataBuffer.Length = 8;
609 blkvsc_req->cmnd[0] = READ_CAPACITY;
610 blkvsc_req->cmd_len = 16;
613 * Set this here since the completion routine may be invoked
614 * and completed before we return
616 blkvsc_req->cond = 0;
618 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
620 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n",
621 blkvsc_req, blkvsc_req->cond);
623 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
626 if (blkvsc_req->request.Status) {
627 scsi_normalize_sense(blkvsc_req->sense_buffer,
628 SCSI_SENSE_BUFFERSIZE, &sense_hdr);
630 if (sense_hdr.asc == 0x3A) {
631 /* Medium not present */
632 blkdev->media_not_present = 1;
636 buf = kmap(page_buf);
639 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) |
640 (buf[2] << 8) | buf[3]) + 1;
641 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) |
642 (buf[6] << 8) | buf[7];
646 __free_page(page_buf);
648 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
653 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
655 struct blkvsc_request *blkvsc_req;
656 struct page *page_buf;
658 struct scsi_sense_hdr sense_hdr;
660 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
662 blkdev->sector_size = 0;
663 blkdev->capacity = 0;
664 blkdev->media_not_present = 0; /* assume a disk is present */
666 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
670 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
671 page_buf = alloc_page(GFP_KERNEL);
673 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
677 init_waitqueue_head(&blkvsc_req->wevent);
678 blkvsc_req->dev = blkdev;
679 blkvsc_req->req = NULL;
680 blkvsc_req->write = 0;
682 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
683 blkvsc_req->request.DataBuffer.Offset = 0;
684 blkvsc_req->request.DataBuffer.Length = 12;
686 blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */
687 blkvsc_req->cmd_len = 16;
690 * Set this here since the completion routine may be invoked
691 * and completed before we return
693 blkvsc_req->cond = 0;
695 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
697 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n",
698 blkvsc_req, blkvsc_req->cond);
700 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
703 if (blkvsc_req->request.Status) {
704 scsi_normalize_sense(blkvsc_req->sense_buffer,
705 SCSI_SENSE_BUFFERSIZE, &sense_hdr);
706 if (sense_hdr.asc == 0x3A) {
707 /* Medium not present */
708 blkdev->media_not_present = 1;
712 buf = kmap(page_buf);
715 blkdev->capacity = be64_to_cpu(*(unsigned long long *) &buf[0]) + 1;
716 blkdev->sector_size = be32_to_cpu(*(unsigned int *)&buf[8]);
719 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) |
720 (buf[2] << 8) | buf[3]) + 1;
721 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) |
722 (buf[6] << 8) | buf[7];
727 __free_page(page_buf);
729 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
735 * blkvsc_remove() - Callback when our device is removed
737 static int blkvsc_remove(struct device *device)
739 struct driver_context *driver_ctx =
740 driver_to_driver_context(device->driver);
741 struct blkvsc_driver_context *blkvsc_drv_ctx =
742 (struct blkvsc_driver_context *)driver_ctx;
743 struct storvsc_driver_object *storvsc_drv_obj =
744 &blkvsc_drv_ctx->drv_obj;
745 struct device_context *device_ctx = device_to_device_context(device);
746 struct hv_device *device_obj = &device_ctx->device_obj;
747 struct block_device_context *blkdev = dev_get_drvdata(device);
751 DPRINT_ENTER(BLKVSC_DRV);
753 DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
755 if (!storvsc_drv_obj->Base.OnDeviceRemove) {
756 DPRINT_EXIT(BLKVSC_DRV);
761 * Call to the vsc driver to let it know that the device is being
764 ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
767 DPRINT_ERR(BLKVSC_DRV,
768 "unable to remove blkvsc device (ret %d)", ret);
771 /* Get to a known state */
772 spin_lock_irqsave(&blkdev->lock, flags);
774 blkdev->shutting_down = 1;
776 blk_stop_queue(blkdev->gd->queue);
778 spin_unlock_irqrestore(&blkdev->lock, flags);
780 while (blkdev->num_outstanding_reqs) {
781 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
782 blkdev->num_outstanding_reqs);
786 blkvsc_do_flush(blkdev);
788 spin_lock_irqsave(&blkdev->lock, flags);
790 blkvsc_cancel_pending_reqs(blkdev);
792 spin_unlock_irqrestore(&blkdev->lock, flags);
794 blk_cleanup_queue(blkdev->gd->queue);
796 del_gendisk(blkdev->gd);
798 kmem_cache_destroy(blkdev->request_pool);
802 DPRINT_EXIT(BLKVSC_DRV);
807 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
809 ASSERT(blkvsc_req->req);
810 ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8));
812 blkvsc_req->cmd_len = 16;
814 if (blkvsc_req->sector_start > 0xffffffff) {
815 if (rq_data_dir(blkvsc_req->req)) {
816 blkvsc_req->write = 1;
817 blkvsc_req->cmnd[0] = WRITE_16;
819 blkvsc_req->write = 0;
820 blkvsc_req->cmnd[0] = READ_16;
823 blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
825 *(unsigned long long *)&blkvsc_req->cmnd[2] =
826 cpu_to_be64(blkvsc_req->sector_start);
827 *(unsigned int *)&blkvsc_req->cmnd[10] =
828 cpu_to_be32(blkvsc_req->sector_count);
829 } else if ((blkvsc_req->sector_count > 0xff) ||
830 (blkvsc_req->sector_start > 0x1fffff)) {
831 if (rq_data_dir(blkvsc_req->req)) {
832 blkvsc_req->write = 1;
833 blkvsc_req->cmnd[0] = WRITE_10;
835 blkvsc_req->write = 0;
836 blkvsc_req->cmnd[0] = READ_10;
839 blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
841 *(unsigned int *)&blkvsc_req->cmnd[2] =
842 cpu_to_be32(blkvsc_req->sector_start);
843 *(unsigned short *)&blkvsc_req->cmnd[7] =
844 cpu_to_be16(blkvsc_req->sector_count);
846 if (rq_data_dir(blkvsc_req->req)) {
847 blkvsc_req->write = 1;
848 blkvsc_req->cmnd[0] = WRITE_6;
850 blkvsc_req->write = 0;
851 blkvsc_req->cmnd[0] = READ_6;
854 *(unsigned int *)&blkvsc_req->cmnd[1] =
855 cpu_to_be32(blkvsc_req->sector_start) >> 8;
856 blkvsc_req->cmnd[1] &= 0x1f;
857 blkvsc_req->cmnd[4] = (unsigned char)blkvsc_req->sector_count;
861 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
862 void (*request_completion)(struct hv_storvsc_request *))
864 struct block_device_context *blkdev = blkvsc_req->dev;
865 struct device_context *device_ctx = blkdev->device_ctx;
866 struct driver_context *driver_ctx =
867 driver_to_driver_context(device_ctx->device.driver);
868 struct blkvsc_driver_context *blkvsc_drv_ctx =
869 (struct blkvsc_driver_context *)driver_ctx;
870 struct storvsc_driver_object *storvsc_drv_obj =
871 &blkvsc_drv_ctx->drv_obj;
872 struct hv_storvsc_request *storvsc_req;
875 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - "
876 "req %p type %s start_sector %lu count %ld offset %d "
877 "len %d\n", blkvsc_req,
878 (blkvsc_req->write) ? "WRITE" : "READ",
879 (unsigned long) blkvsc_req->sector_start,
880 blkvsc_req->sector_count,
881 blkvsc_req->request.DataBuffer.Offset,
882 blkvsc_req->request.DataBuffer.Length);
884 for (i = 0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++) {
885 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - "
886 "req %p pfn[%d] %llx\n",
888 blkvsc_req->request.DataBuffer.PfnArray[i]);
892 storvsc_req = &blkvsc_req->request;
893 storvsc_req->Extension = (void *)((unsigned long)blkvsc_req +
894 sizeof(struct blkvsc_request));
896 storvsc_req->Type = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
898 storvsc_req->OnIOCompletion = request_completion;
899 storvsc_req->Context = blkvsc_req;
901 storvsc_req->Host = blkdev->port;
902 storvsc_req->Bus = blkdev->path;
903 storvsc_req->TargetId = blkdev->target;
904 storvsc_req->LunId = 0; /* this is not really used at all */
906 storvsc_req->CdbLen = blkvsc_req->cmd_len;
907 storvsc_req->Cdb = blkvsc_req->cmnd;
909 storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
910 storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
912 ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj,
913 &blkvsc_req->request);
915 blkdev->num_outstanding_reqs++;
921 * We break the request into 1 or more blkvsc_requests and submit
922 * them. If we cant submit them all, we put them on the
923 * pending_list. The blkvsc_request() will work on the pending_list.
925 static int blkvsc_do_request(struct block_device_context *blkdev,
928 struct bio *bio = NULL;
929 struct bio_vec *bvec = NULL;
930 struct bio_vec *prev_bvec = NULL;
931 struct blkvsc_request *blkvsc_req = NULL;
932 struct blkvsc_request *tmp;
935 sector_t start_sector;
936 unsigned long num_sectors = 0;
939 struct blkvsc_request_group *group = NULL;
941 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu \n", blkdev, req,
942 (unsigned long)blk_rq_pos(req));
944 /* Create a group to tie req to list of blkvsc_reqs */
945 group = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
949 INIT_LIST_HEAD(&group->blkvsc_req_list);
950 group->outstanding = group->status = 0;
952 start_sector = blk_rq_pos(req);
954 /* foreach bio in the request */
956 for (bio = req->bio; bio; bio = bio->bi_next) {
958 * Map this bio into an existing or new storvsc request
960 bio_for_each_segment(bvec, bio, seg_idx) {
961 DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() "
962 "- req %p bio %p bvec %p seg_idx %d "
963 "databuf_idx %d\n", req, bio, bvec,
964 seg_idx, databuf_idx);
966 /* Get a new storvsc request */
969 (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT)
970 /* hole at the begin of page */
971 || (bvec->bv_offset != 0) ||
972 /* hold at the end of page */
974 (prev_bvec->bv_len != PAGE_SIZE))) {
975 /* submit the prev one */
977 blkvsc_req->sector_start = start_sector;
978 sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
980 blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
981 blkvsc_init_rw(blkvsc_req);
985 * Create new blkvsc_req to represent
988 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
990 /* free up everything */
991 list_for_each_entry_safe(
993 &group->blkvsc_req_list,
995 list_del(&blkvsc_req->req_entry);
996 kmem_cache_free(blkdev->request_pool, blkvsc_req);
999 kmem_cache_free(blkdev->request_pool, group);
1003 memset(blkvsc_req, 0,
1004 sizeof(struct blkvsc_request));
1006 blkvsc_req->dev = blkdev;
1007 blkvsc_req->req = req;
1008 blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
1009 blkvsc_req->request.DataBuffer.Length = 0;
1011 /* Add to the group */
1012 blkvsc_req->group = group;
1013 blkvsc_req->group->outstanding++;
1014 list_add_tail(&blkvsc_req->req_entry,
1015 &blkvsc_req->group->blkvsc_req_list);
1017 start_sector += num_sectors;
1022 /* Add the curr bvec/segment to the curr blkvsc_req */
1023 blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
1024 blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
1029 num_sectors += bvec->bv_len >> 9;
1031 } /* bio_for_each_segment */
1033 } /* rq_for_each_bio */
1036 /* Handle the last one */
1038 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n",
1039 blkdev, req, blkvsc_req->group,
1040 blkvsc_req->group->outstanding);
1042 blkvsc_req->sector_start = start_sector;
1043 sector_div(blkvsc_req->sector_start,
1044 (blkdev->sector_size >> 9));
1046 blkvsc_req->sector_count = num_sectors /
1047 (blkdev->sector_size >> 9);
1049 blkvsc_init_rw(blkvsc_req);
1052 list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) {
1054 DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to "
1055 "pending_list - blkvsc_req %p start_sect %lu"
1056 " sect_count %ld (%lu %ld)\n", blkvsc_req,
1057 (unsigned long)blkvsc_req->sector_start,
1058 blkvsc_req->sector_count,
1059 (unsigned long)start_sector,
1060 (unsigned long)num_sectors);
1062 list_add_tail(&blkvsc_req->pend_entry,
1063 &blkdev->pending_list);
1065 ret = blkvsc_submit_request(blkvsc_req,
1066 blkvsc_request_completion);
1069 list_add_tail(&blkvsc_req->pend_entry,
1070 &blkdev->pending_list);
1073 DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p "
1074 "start_sect %lu sect_count %ld (%lu %ld) "
1075 "ret %d\n", blkvsc_req,
1076 (unsigned long)blkvsc_req->sector_start,
1077 blkvsc_req->sector_count,
1078 (unsigned long)start_sector,
1086 static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
1088 struct blkvsc_request *blkvsc_req =
1089 (struct blkvsc_request *)request->Context;
1090 struct block_device_context *blkdev =
1091 (struct block_device_context *)blkvsc_req->dev;
1092 struct scsi_sense_hdr sense_hdr;
1094 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n",
1097 blkdev->num_outstanding_reqs--;
1099 if (blkvsc_req->request.Status)
1100 if (scsi_normalize_sense(blkvsc_req->sense_buffer,
1101 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1102 scsi_print_sense_hdr("blkvsc", &sense_hdr);
1104 blkvsc_req->cond = 1;
1105 wake_up_interruptible(&blkvsc_req->wevent);
1108 static void blkvsc_request_completion(struct hv_storvsc_request *request)
1110 struct blkvsc_request *blkvsc_req =
1111 (struct blkvsc_request *)request->Context;
1112 struct block_device_context *blkdev =
1113 (struct block_device_context *)blkvsc_req->dev;
1114 unsigned long flags;
1115 struct blkvsc_request *comp_req, *tmp;
1117 ASSERT(blkvsc_req->group);
1119 DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s "
1120 "sect_start %lu sect_count %ld len %d group outstd %d "
1121 "total outstd %d\n",
1122 blkdev, blkvsc_req, blkvsc_req->group,
1123 (blkvsc_req->write) ? "WRITE" : "READ",
1124 (unsigned long)blkvsc_req->sector_start,
1125 blkvsc_req->sector_count,
1126 blkvsc_req->request.DataBuffer.Length,
1127 blkvsc_req->group->outstanding,
1128 blkdev->num_outstanding_reqs);
1130 spin_lock_irqsave(&blkdev->lock, flags);
1132 blkdev->num_outstanding_reqs--;
1133 blkvsc_req->group->outstanding--;
1136 * Only start processing when all the blkvsc_reqs are
1137 * completed. This guarantees no out-of-order blkvsc_req
1138 * completion when calling end_that_request_first()
1140 if (blkvsc_req->group->outstanding == 0) {
1141 list_for_each_entry_safe(comp_req, tmp,
1142 &blkvsc_req->group->blkvsc_req_list,
1144 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
1145 "sect_start %lu sect_count %ld \n",
1147 (unsigned long)comp_req->sector_start,
1148 comp_req->sector_count);
1150 list_del(&comp_req->req_entry);
1152 if (!__blk_end_request(comp_req->req,
1153 (!comp_req->request.Status ? 0 : -EIO),
1154 comp_req->sector_count * blkdev->sector_size)) {
1156 * All the sectors have been xferred ie the
1159 DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n",
1161 kmem_cache_free(blkdev->request_pool,
1165 kmem_cache_free(blkdev->request_pool, comp_req);
1168 if (!blkdev->shutting_down) {
1169 blkvsc_do_pending_reqs(blkdev);
1170 blk_start_queue(blkdev->gd->queue);
1171 blkvsc_request(blkdev->gd->queue);
1175 spin_unlock_irqrestore(&blkdev->lock, flags);
1178 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1180 struct blkvsc_request *pend_req, *tmp;
1181 struct blkvsc_request *comp_req, *tmp2;
1185 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1187 /* Flush the pending list first */
1188 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
1191 * The pend_req could be part of a partially completed
1192 * request. If so, complete those req first until we
1195 list_for_each_entry_safe(comp_req, tmp2,
1196 &pend_req->group->blkvsc_req_list,
1198 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p "
1199 "sect_start %lu sect_count %ld \n",
1201 (unsigned long) comp_req->sector_start,
1202 comp_req->sector_count);
1204 if (comp_req == pend_req)
1207 list_del(&comp_req->req_entry);
1209 if (comp_req->req) {
1210 ret = __blk_end_request(comp_req->req,
1211 (!comp_req->request.Status ? 0 : -EIO),
1212 comp_req->sector_count *
1213 blkdev->sector_size);
1217 kmem_cache_free(blkdev->request_pool, comp_req);
1220 DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n",
1223 list_del(&pend_req->pend_entry);
1225 list_del(&pend_req->req_entry);
1227 if (comp_req->req) {
1228 if (!__blk_end_request(pend_req->req, -EIO,
1229 pend_req->sector_count *
1230 blkdev->sector_size)) {
1232 * All the sectors have been xferred ie the
1235 DPRINT_DBG(BLKVSC_DRV,
1236 "blkvsc_cancel_pending_reqs() - "
1237 "req %p COMPLETED\n", pend_req->req);
1238 kmem_cache_free(blkdev->request_pool,
1243 kmem_cache_free(blkdev->request_pool, pend_req);
1249 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1251 struct blkvsc_request *pend_req, *tmp;
1254 /* Flush the pending list first */
1255 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
1257 DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n",
1260 ret = blkvsc_submit_request(pend_req,
1261 blkvsc_request_completion);
1265 list_del(&pend_req->pend_entry);
1271 static void blkvsc_request(struct request_queue *queue)
1273 struct block_device_context *blkdev = NULL;
1274 struct request *req;
1277 DPRINT_DBG(BLKVSC_DRV, "- enter \n");
1278 while ((req = blk_peek_request(queue)) != NULL) {
1279 DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
1281 blkdev = req->rq_disk->private_data;
1282 if (blkdev->shutting_down || !blk_fs_request(req) ||
1283 blkdev->media_not_present) {
1284 __blk_end_request_cur(req, 0);
1288 ret = blkvsc_do_pending_reqs(blkdev);
1291 DPRINT_DBG(BLKVSC_DRV,
1292 "- stop queue - pending_list not empty\n");
1293 blk_stop_queue(queue);
1297 blk_start_request(req);
1299 ret = blkvsc_do_request(blkdev, req);
1301 DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
1302 blk_stop_queue(queue);
1304 } else if (ret < 0) {
1305 DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
1306 blk_requeue_request(queue, req);
1307 blk_stop_queue(queue);
1313 static int blkvsc_open(struct block_device *bdev, fmode_t mode)
1315 struct block_device_context *blkdev = bdev->bd_disk->private_data;
1317 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
1318 blkdev->gd->disk_name);
1320 spin_lock(&blkdev->lock);
1322 if (!blkdev->users && blkdev->device_type == DVD_TYPE) {
1323 spin_unlock(&blkdev->lock);
1324 check_disk_change(bdev);
1325 spin_lock(&blkdev->lock);
1330 spin_unlock(&blkdev->lock);
1334 static int blkvsc_release(struct gendisk *disk, fmode_t mode)
1336 struct block_device_context *blkdev = disk->private_data;
1338 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users,
1339 blkdev->gd->disk_name);
1341 spin_lock(&blkdev->lock);
1342 if (blkdev->users == 1) {
1343 spin_unlock(&blkdev->lock);
1344 blkvsc_do_flush(blkdev);
1345 spin_lock(&blkdev->lock);
1350 spin_unlock(&blkdev->lock);
1354 static int blkvsc_media_changed(struct gendisk *gd)
1356 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1360 static int blkvsc_revalidate_disk(struct gendisk *gd)
1362 struct block_device_context *blkdev = gd->private_data;
1364 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1366 if (blkdev->device_type == DVD_TYPE) {
1367 blkvsc_do_read_capacity(blkdev);
1368 set_capacity(blkdev->gd, blkdev->capacity *
1369 (blkdev->sector_size/512));
1370 blk_queue_logical_block_size(gd->queue, blkdev->sector_size);
1375 static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1377 sector_t total_sectors = get_capacity(bd->bd_disk);
1378 sector_t cylinder_times_heads = 0;
1381 int sectors_per_track = 0;
1386 if (total_sectors > (65535 * 16 * 255))
1387 total_sectors = (65535 * 16 * 255);
1389 if (total_sectors >= (65535 * 16 * 63)) {
1390 sectors_per_track = 255;
1393 cylinder_times_heads = total_sectors;
1394 /* sector_div stores the quotient in cylinder_times_heads */
1395 rem = sector_div(cylinder_times_heads, sectors_per_track);
1397 sectors_per_track = 17;
1399 cylinder_times_heads = total_sectors;
1400 /* sector_div stores the quotient in cylinder_times_heads */
1401 rem = sector_div(cylinder_times_heads, sectors_per_track);
1403 temp = cylinder_times_heads + 1023;
1404 /* sector_div stores the quotient in temp */
1405 rem = sector_div(temp, 1024);
1413 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1414 sectors_per_track = 31;
1417 cylinder_times_heads = total_sectors;
1419 * sector_div stores the quotient in
1420 * cylinder_times_heads
1422 rem = sector_div(cylinder_times_heads,
1426 if (cylinder_times_heads >= (heads * 1024)) {
1427 sectors_per_track = 63;
1430 cylinder_times_heads = total_sectors;
1432 * sector_div stores the quotient in
1433 * cylinder_times_heads
1435 rem = sector_div(cylinder_times_heads,
1440 temp = cylinder_times_heads;
1441 /* sector_div stores the quotient in temp */
1442 rem = sector_div(temp, heads);
1446 hg->sectors = sectors_per_track;
1447 hg->cylinders = cylinders;
1449 DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads,
1455 static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
1456 unsigned cmd, unsigned long argument)
1458 /* struct block_device_context *blkdev = bd->bd_disk->private_data; */
1463 * TODO: I think there is certain format for HDIO_GET_IDENTITY rather
1464 * than just a GUID. Commented it out for now.
1467 case HDIO_GET_IDENTITY:
1468 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1469 if (copy_to_user((void __user *)arg, blkdev->device_id,
1470 blkdev->device_id_len))
1482 static int __init blkvsc_init(void)
1486 ASSERT(sizeof(sector_t) == 8); /* Make sure CONFIG_LBD is set */
1488 DPRINT_ENTER(BLKVSC_DRV);
1490 DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
1492 ret = blkvsc_drv_init(BlkVscInitialize);
1494 DPRINT_EXIT(BLKVSC_DRV);
1499 static void __exit blkvsc_exit(void)
1501 DPRINT_ENTER(BLKVSC_DRV);
1503 DPRINT_ENTER(BLKVSC_DRV);
1506 MODULE_LICENSE("GPL");
1507 module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
1508 module_init(blkvsc_init);
1509 module_exit(blkvsc_exit);