]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/hv/blkvsc_drv.c
Staging: hv: osd: remove LogMsg wrapper
[net-next-2.6.git] / drivers / staging / hv / blkvsc_drv.c
CommitLineData
f82bd046
HJ
1/*
2 *
3 * Copyright (c) 2009, Microsoft Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16 * Place - Suite 330, Boston, MA 02111-1307 USA.
17 *
18 * Authors:
19 * Hank Janssen <hjanssen@microsoft.com>
20 *
21 */
22
0fce4c2f 23#define KERNEL_2_6_27
f82bd046
HJ
24
25#include <linux/init.h>
26#include <linux/module.h>
27#include <linux/device.h>
28#include <linux/blkdev.h>
29#include <linux/major.h>
30#include <linux/delay.h>
31#include <linux/hdreg.h>
32
33#include <scsi/scsi.h>
34#include <scsi/scsi_cmnd.h>
35#include <scsi/scsi_eh.h>
36#include <scsi/scsi_dbg.h>
37
0fce4c2f
GKH
38#include "include/logging.h"
39#include "include/vmbus.h"
f82bd046 40
0fce4c2f 41#include "include/StorVscApi.h"
f82bd046
HJ
42
43//
44// #defines
45//
46#define BLKVSC_MINORS 64
47
48//
49// Data types
50//
51enum blkvsc_device_type {
52 UNKNOWN_DEV_TYPE,
53 HARDDISK_TYPE,
54 DVD_TYPE,
55};
56
57// This request ties the struct request and struct blkvsc_request/STORVSC_REQUEST together
58// A struct request may be represented by 1 or more struct blkvsc_request
59struct blkvsc_request_group {
60 int outstanding;
61 int status;
62
63 struct list_head blkvsc_req_list; // list of blkvsc_requests
64};
65
66
67struct blkvsc_request {
68 struct list_head req_entry; // blkvsc_request_group.blkvsc_req_list
69
70 struct list_head pend_entry; // block_device_context.pending_list
71
72 struct request *req; // This may be null if we generate a request internally
73 struct block_device_context *dev;
74 struct blkvsc_request_group *group; // The group this request is part of. Maybe null
75
76 wait_queue_head_t wevent;
77 int cond;
78
79 int write;
80 sector_t sector_start;
81 unsigned long sector_count;
82
83 unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
84 unsigned char cmd_len;
85 unsigned char cmnd[MAX_COMMAND_SIZE];
86
87 STORVSC_REQUEST request;
88 // !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because -
89 // The extension buffer falls right here and is pointed to by request.Extension;
90};
91
92// Per device structure
93struct block_device_context {
94 struct device_context *device_ctx; // point back to our device context
95 struct kmem_cache *request_pool;
96 spinlock_t lock;
97 struct gendisk *gd;
98 enum blkvsc_device_type device_type;
99 struct list_head pending_list;
100
101 unsigned char device_id[64];
102 unsigned int device_id_len;
103 int num_outstanding_reqs;
104 int shutting_down;
105 int media_not_present;
106 unsigned int sector_size;
107 sector_t capacity;
108 unsigned int port;
109 unsigned char path;
110 unsigned char target;
111 int users;
112};
113
114// Per driver
115struct blkvsc_driver_context {
116 // !! These must be the first 2 fields !!
117 struct driver_context drv_ctx;
118 STORVSC_DRIVER_OBJECT drv_obj;
119};
120
121// Static decl
122static int blkvsc_probe(struct device *dev);
123static int blkvsc_remove(struct device *device);
124static void blkvsc_shutdown(struct device *device);
125
126static int blkvsc_open(struct inode *inode, struct file *filep);
127static int blkvsc_release(struct inode *inode, struct file *filep);
128static int blkvsc_media_changed(struct gendisk *gd);
129static int blkvsc_revalidate_disk(struct gendisk *gd);
130static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
131static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg);
132
133static void blkvsc_request(struct request_queue *queue);
134static void blkvsc_request_completion(STORVSC_REQUEST* request);
135static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req);
136static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) );
137static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req);
138static void blkvsc_cmd_completion(STORVSC_REQUEST* request);
139static int blkvsc_do_inquiry(struct block_device_context *blkdev);
140static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
141static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
142static int blkvsc_do_flush(struct block_device_context *blkdev);
143static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
144static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
145
146
147static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
148
149// The one and only one
150static struct blkvsc_driver_context g_blkvsc_drv;
151
152
153static struct block_device_operations block_ops =
154{
155 .owner = THIS_MODULE,
156 .open = blkvsc_open,
157 .release = blkvsc_release,
158 .media_changed = blkvsc_media_changed,
159 .revalidate_disk = blkvsc_revalidate_disk,
160 .getgeo = blkvsc_getgeo,
161 .ioctl = blkvsc_ioctl,
162};
163
164/*++
165
166Name: blkvsc_drv_init()
167
168Desc: BlkVsc driver initialization.
169
170--*/
171int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
172{
173 int ret=0;
174 STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
175 struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
176
177 DPRINT_ENTER(BLKVSC_DRV);
178
179 vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
180
181 storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
182
183 // Callback to client driver to complete the initialization
184 pfn_drv_init(&storvsc_drv_obj->Base);
185
186 drv_ctx->driver.name = storvsc_drv_obj->Base.name;
187 memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID));
188
189#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
190 drv_ctx->driver.probe = blkvsc_probe;
191 drv_ctx->driver.remove = blkvsc_remove;
192#else
193 drv_ctx->probe = blkvsc_probe;
194 drv_ctx->remove = blkvsc_remove;
195 drv_ctx->shutdown = blkvsc_shutdown;
196#endif
197
198 // The driver belongs to vmbus
199 vmbus_child_driver_register(drv_ctx);
200
201 DPRINT_EXIT(BLKVSC_DRV);
202
203 return ret;
204}
205
206
207static int blkvsc_drv_exit_cb(struct device *dev, void *data)
208{
209 struct device **curr = (struct device **)data;
210 *curr = dev;
211 return 1; // stop iterating
212}
213
214/*++
215
216Name: blkvsc_drv_exit()
217
218Desc:
219
220--*/
221void blkvsc_drv_exit(void)
222{
223 STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
224 struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
225
226 struct device *current_dev=NULL;
227
228#if defined(KERNEL_2_6_5) || defined(KERNEL_2_6_9)
229#define driver_for_each_device(drv, start, data, fn) \
230 struct list_head *ptr, *n; \
231 list_for_each_safe(ptr, n, &((drv)->devices)) {\
232 struct device *curr_dev;\
233 curr_dev = list_entry(ptr, struct device, driver_list);\
234 fn(curr_dev, data);\
235 }
236#endif // KERNEL_2_6_9
237
238 DPRINT_ENTER(BLKVSC_DRV);
239
240 while (1)
241 {
242 current_dev = NULL;
243
244 // Get the device
245 driver_for_each_device(&drv_ctx->driver, NULL, (void*)&current_dev, blkvsc_drv_exit_cb);
246
247 if (current_dev == NULL)
248 break;
249
250 // Initiate removal from the top-down
251 device_unregister(current_dev);
252 }
253
254 if (storvsc_drv_obj->Base.OnCleanup)
255 storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
256
257 vmbus_child_driver_unregister(drv_ctx);
258
259 DPRINT_EXIT(BLKVSC_DRV);
260
261 return;
262}
263
264/*++
265
266Name: blkvsc_probe()
267
268Desc: Add a new device for this driver
269
270--*/
271static int blkvsc_probe(struct device *device)
272{
273 int ret=0;
274
275 struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
276 struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
277 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
278
279 struct device_context *device_ctx = device_to_device_context(device);
280 DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
281
282 struct block_device_context *blkdev=NULL;
283 STORVSC_DEVICE_INFO device_info;
284 int major=0;
285 int devnum=0;
286
287 static int ide0_registered=0;
288 static int ide1_registered=0;
289
290 DPRINT_ENTER(BLKVSC_DRV);
291
292 DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
293
294 if (!storvsc_drv_obj->Base.OnDeviceAdd)
295 {
296 DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
297
298 ret = -1;
299 goto Cleanup;
300 }
301
302 blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
303 if (!blkdev)
304 {
305 ret = -ENOMEM;
306 goto Cleanup;
307 }
308
309 INIT_LIST_HEAD(&blkdev->pending_list);
310
311 // Initialize what we can here
312 spin_lock_init(&blkdev->lock);
313
314 ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request));
315
316#ifdef KERNEL_2_6_27
0fce4c2f 317 blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
f82bd046
HJ
318 sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
319 SLAB_HWCACHE_ALIGN, NULL);
320#else
321 blkdev->request_pool = kmem_cache_create(device_ctx->device.bus_id,
322 sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
323 SLAB_HWCACHE_ALIGN, NULL, NULL);
324#endif
325 if (!blkdev->request_pool)
326 {
327 ret = -ENOMEM;
328 goto Cleanup;
329 }
330
331
332 // Call to the vsc driver to add the device
333 ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
334 if (ret != 0)
335 {
336 DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
337 goto Cleanup;
338 }
339
340 blkdev->device_ctx = device_ctx;
341 blkdev->target = device_info.TargetId; // this identified the device 0 or 1
342 blkdev->path = device_info.PathId; // this identified the ide ctrl 0 or 1
343
b57a68dc 344 dev_set_drvdata(device, blkdev);
f82bd046
HJ
345
346 // Calculate the major and device num
347 if (blkdev->path == 0)
348 {
349 major = IDE0_MAJOR;
350 devnum = blkdev->path + blkdev->target; // 0 or 1
351
352 if (!ide0_registered)
353 {
354 ret = register_blkdev(major, "ide");
355 if (ret != 0)
356 {
357 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
358 goto Remove;
359 }
360
361 ide0_registered = 1;
362 }
363 }
364 else if (blkdev->path == 1)
365 {
366 major = IDE1_MAJOR;
367 devnum = blkdev->path + blkdev->target + 1; // 2 or 3
368
369 if (!ide1_registered)
370 {
371 ret = register_blkdev(major, "ide");
372 if (ret != 0)
373 {
374 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
375 goto Remove;
376 }
377
378 ide1_registered = 1;
379 }
380
381 }
382 else
383 {
384 DPRINT_ERR(BLKVSC_DRV, "invalid pathid");
385 ret = -1;
386 goto Cleanup;
387 }
388
389 DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major);
390
391 blkdev->gd = alloc_disk(BLKVSC_MINORS);
392 if (!blkdev->gd)
393 {
394 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
395 ret = -1;
396 goto Cleanup;
397 }
398
399 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
400
401 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
402 blk_queue_max_phys_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
403 blk_queue_max_hw_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
404 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
405 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
406 blk_queue_dma_alignment(blkdev->gd->queue, 511);
407
408 blkdev->gd->major = major;
409 if (devnum == 1 || devnum == 3)
410 blkdev->gd->first_minor = BLKVSC_MINORS;
411 else
412 blkdev->gd->first_minor = 0;
413 blkdev->gd->fops = &block_ops;
414 blkdev->gd->private_data = blkdev;
415 sprintf(blkdev->gd->disk_name, "hd%c", 'a'+ devnum);
416
417 blkvsc_do_inquiry(blkdev);
418 if (blkdev->device_type == DVD_TYPE)
419 {
420 set_disk_ro(blkdev->gd, 1);
421 blkdev->gd->flags |= GENHD_FL_REMOVABLE;
422 blkvsc_do_read_capacity(blkdev);
423 }
424 else
425 {
426 blkvsc_do_read_capacity16(blkdev);
427 }
428
429 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
0fce4c2f 430 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
f82bd046
HJ
431 // go!
432 add_disk(blkdev->gd);
433
434 DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %llu sector_size %d", blkdev->gd->disk_name, blkdev->capacity, blkdev->sector_size);
435
436 return ret;
437
438Remove:
439 storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
440
441Cleanup:
442 if (blkdev)
443 {
444 if (blkdev->request_pool)
445 {
446 kmem_cache_destroy(blkdev->request_pool);
447 blkdev->request_pool = NULL;
448 }
449 kfree(blkdev);
450 blkdev = NULL;
451 }
452
453 DPRINT_EXIT(BLKVSC_DRV);
454
455 return ret;
456}
457
458static void blkvsc_shutdown(struct device *device)
459{
b57a68dc 460 struct block_device_context *blkdev = dev_get_drvdata(device);
f82bd046
HJ
461 unsigned long flags;
462
463 if (!blkdev)
464 return;
465
466 DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
467
468 spin_lock_irqsave(&blkdev->lock, flags);
469
470 blkdev->shutting_down = 1;
471
472 blk_stop_queue(blkdev->gd->queue);
473
474 spin_unlock_irqrestore(&blkdev->lock, flags);
475
476 while (blkdev->num_outstanding_reqs)
477 {
478 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
479
480 udelay(100);
481 }
482
483 blkvsc_do_flush(blkdev);
484
485 spin_lock_irqsave(&blkdev->lock, flags);
486
487 blkvsc_cancel_pending_reqs(blkdev);
488
489 spin_unlock_irqrestore(&blkdev->lock, flags);
490}
491
492static int blkvsc_do_flush(struct block_device_context *blkdev)
493{
494 struct blkvsc_request *blkvsc_req=NULL;
495
496 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n");
497
498 if (blkdev->device_type != HARDDISK_TYPE)
499 return 0;
500
501 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
502 if (!blkvsc_req)
503 {
504 return -ENOMEM;
505 }
506
507 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
508 init_waitqueue_head(&blkvsc_req->wevent);
509 blkvsc_req->dev = blkdev;
510 blkvsc_req->req = NULL;
511 blkvsc_req->write = 0;
512
513 blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
514 blkvsc_req->request.DataBuffer.Offset = 0;
515 blkvsc_req->request.DataBuffer.Length = 0;
516
517 blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
518 blkvsc_req->cmd_len = 10;
519
520 // Set this here since the completion routine may be invoked and completed before we return
521 blkvsc_req->cond =0;
522 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
523
524 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
525
526 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
527
528 return 0;
529}
530
531// Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd)
532static int blkvsc_do_inquiry(struct block_device_context *blkdev)
533{
534 struct blkvsc_request *blkvsc_req=NULL;
535 struct page *page_buf;
536 unsigned char *buf;
537 unsigned char device_type;
538
539 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n");
540
541 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
542 if (!blkvsc_req)
543 {
544 return -ENOMEM;
545 }
546
547 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
548 page_buf = alloc_page(GFP_KERNEL);
549 if (!page_buf)
550 {
551 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
552 return -ENOMEM;
553 }
554
555 init_waitqueue_head(&blkvsc_req->wevent);
556 blkvsc_req->dev = blkdev;
557 blkvsc_req->req = NULL;
558 blkvsc_req->write = 0;
559
560 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
561 blkvsc_req->request.DataBuffer.Offset = 0;
562 blkvsc_req->request.DataBuffer.Length = 64;
563
564 blkvsc_req->cmnd[0] = INQUIRY;
565 blkvsc_req->cmnd[1] = 0x1; // Get product data
566 blkvsc_req->cmnd[2] = 0x83; // mode page 83
567 blkvsc_req->cmnd[4] = 64;
568 blkvsc_req->cmd_len = 6;
569
570 // Set this here since the completion routine may be invoked and completed before we return
571 blkvsc_req->cond =0;
572
573 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
574
575 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
576
577 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
578
579 buf = kmap(page_buf);
580
581 //PrintBytes(buf, 64);
582 // be to le
583 device_type = buf[0] & 0x1F;
584
585 if (device_type == 0x0)
586 {
587 blkdev->device_type = HARDDISK_TYPE;
588 }
589 else if (device_type == 0x5)
590 {
591 blkdev->device_type = DVD_TYPE;
592 }
593 else
594 {
595 // TODO: this is currently unsupported device type
596 blkdev->device_type = UNKNOWN_DEV_TYPE;
597 }
598
599 DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type);
600
601 blkdev->device_id_len = buf[7];
602 if (blkdev->device_id_len > 64)
603 blkdev->device_id_len = 64;
604
605 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
606 //PrintBytes(blkdev->device_id, blkdev->device_id_len);
607
608 kunmap(page_buf);
609
610 __free_page(page_buf);
611
612 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
613
614 return 0;
615}
616
617// Do a scsi READ_CAPACITY cmd here to get the size of the disk
618static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
619{
620 struct blkvsc_request *blkvsc_req=NULL;
621 struct page *page_buf;
622 unsigned char *buf;
623 struct scsi_sense_hdr sense_hdr;
624
625 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
626
627 blkdev->sector_size = 0;
628 blkdev->capacity = 0;
629 blkdev->media_not_present = 0; // assume a disk is present
630
631 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
632 if (!blkvsc_req)
633 {
634 return -ENOMEM;
635 }
636
637 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
638 page_buf = alloc_page(GFP_KERNEL);
639 if (!page_buf)
640 {
641 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
642 return -ENOMEM;
643 }
644
645 init_waitqueue_head(&blkvsc_req->wevent);
646 blkvsc_req->dev = blkdev;
647 blkvsc_req->req = NULL;
648 blkvsc_req->write = 0;
649
650 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
651 blkvsc_req->request.DataBuffer.Offset = 0;
652 blkvsc_req->request.DataBuffer.Length = 8;
653
654 blkvsc_req->cmnd[0] = READ_CAPACITY;
655 blkvsc_req->cmd_len = 16;
656
657 // Set this here since the completion routine may be invoked and completed before we return
658 blkvsc_req->cond =0;
659
660 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
661
662 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
663
664 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
665
666 // check error
667 if (blkvsc_req->request.Status)
668 {
669 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
670
671 if (sense_hdr.asc == 0x3A) // Medium not present
672 {
673 blkdev->media_not_present = 1;
674 }
675
676 return 0;
677 }
678 buf = kmap(page_buf);
679
680 // be to le
681 blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
682 blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
683
684 kunmap(page_buf);
685
686 __free_page(page_buf);
687
688 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
689
690 return 0;
691}
692
693
694static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
695{
696 struct blkvsc_request *blkvsc_req=NULL;
697 struct page *page_buf;
698 unsigned char *buf;
699 struct scsi_sense_hdr sense_hdr;
700
701 DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
702
703 blkdev->sector_size = 0;
704 blkdev->capacity = 0;
705 blkdev->media_not_present = 0; // assume a disk is present
706
707 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
708 if (!blkvsc_req)
709 {
710 return -ENOMEM;
711 }
712
713 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
714 page_buf = alloc_page(GFP_KERNEL);
715 if (!page_buf)
716 {
717 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
718 return -ENOMEM;
719 }
720
721 init_waitqueue_head(&blkvsc_req->wevent);
722 blkvsc_req->dev = blkdev;
723 blkvsc_req->req = NULL;
724 blkvsc_req->write = 0;
725
726 blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
727 blkvsc_req->request.DataBuffer.Offset = 0;
728 blkvsc_req->request.DataBuffer.Length = 12;
729
730 blkvsc_req->cmnd[0] = 0x9E; //READ_CAPACITY16;
731 blkvsc_req->cmd_len = 16;
732
733 // Set this here since the completion routine may be invoked and completed before we return
734 blkvsc_req->cond =0;
735
736 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
737
738 DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
739
740 wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
741
742 // check error
743 if (blkvsc_req->request.Status)
744 {
745 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
746
747 if (sense_hdr.asc == 0x3A) // Medium not present
748 {
749 blkdev->media_not_present = 1;
750 }
751
752 return 0;
753 }
754 buf = kmap(page_buf);
755
756 // be to le
757 blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1;
758 blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]);
759
760 //blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
761 //blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
762
763 kunmap(page_buf);
764
765 __free_page(page_buf);
766
767 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
768
769 return 0;
770}
771
772/*++
773
774Name: blkvsc_remove()
775
776Desc: Callback when our device is removed
777
778--*/
779static int blkvsc_remove(struct device *device)
780{
781 int ret=0;
782
783 struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
784 struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
785 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
786
787 struct device_context *device_ctx = device_to_device_context(device);
788 DEVICE_OBJECT* device_obj = &device_ctx->device_obj;
b57a68dc 789 struct block_device_context *blkdev = dev_get_drvdata(device);
f82bd046
HJ
790 unsigned long flags;
791
792 DPRINT_ENTER(BLKVSC_DRV);
793
794 DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
795
796 if (!storvsc_drv_obj->Base.OnDeviceRemove)
797 {
798 DPRINT_EXIT(BLKVSC_DRV);
799 return -1;
800 }
801
802 // Call to the vsc driver to let it know that the device is being removed
803 ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
804 if (ret != 0)
805 {
806 // TODO:
807 DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret);
808 }
809
810 // Get to a known state
811 spin_lock_irqsave(&blkdev->lock, flags);
812
813 blkdev->shutting_down = 1;
814
815 blk_stop_queue(blkdev->gd->queue);
816
817 spin_unlock_irqrestore(&blkdev->lock, flags);
818
819 while (blkdev->num_outstanding_reqs)
820 {
821 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
822
823 udelay(100);
824 }
825
826 blkvsc_do_flush(blkdev);
827
828 spin_lock_irqsave(&blkdev->lock, flags);
829
830 blkvsc_cancel_pending_reqs(blkdev);
831
832 spin_unlock_irqrestore(&blkdev->lock, flags);
833
834 blk_cleanup_queue(blkdev->gd->queue);
835
836 del_gendisk(blkdev->gd);
837
838 kmem_cache_destroy(blkdev->request_pool);
839
840 kfree(blkdev);
841
842 DPRINT_EXIT(BLKVSC_DRV);
843
844 return ret;
845}
846
847static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
848{
849 ASSERT(blkvsc_req->req);
850 ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8));
851
852 blkvsc_req->cmd_len = 16;
853
854 if (blkvsc_req->sector_start > 0xffffffff)
855 {
856 if (rq_data_dir(blkvsc_req->req))
857 {
858 blkvsc_req->write = 1;
859 blkvsc_req->cmnd[0] = WRITE_16;
860 }
861 else
862 {
863 blkvsc_req->write = 0;
864 blkvsc_req->cmnd[0] = READ_16;
865 }
866
867 blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
868
869 *(unsigned long long*)&blkvsc_req->cmnd[2] = cpu_to_be64(blkvsc_req->sector_start);
870 *(unsigned int*)&blkvsc_req->cmnd[10] = cpu_to_be32(blkvsc_req->sector_count);
871 }
872 else if ((blkvsc_req->sector_count > 0xff) || (blkvsc_req->sector_start > 0x1fffff))
873 {
874 if (rq_data_dir(blkvsc_req->req))
875 {
876 blkvsc_req->write = 1;
877 blkvsc_req->cmnd[0] = WRITE_10;
878 }
879 else
880 {
881 blkvsc_req->write = 0;
882 blkvsc_req->cmnd[0] = READ_10;
883 }
884
885 blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
886
887 *(unsigned int *)&blkvsc_req->cmnd[2] = cpu_to_be32(blkvsc_req->sector_start);
888 *(unsigned short*)&blkvsc_req->cmnd[7] = cpu_to_be16(blkvsc_req->sector_count);
889 }
890 else
891 {
892 if (rq_data_dir(blkvsc_req->req))
893 {
894 blkvsc_req->write = 1;
895 blkvsc_req->cmnd[0] = WRITE_6;
896 }
897 else
898 {
899 blkvsc_req->write = 0;
900 blkvsc_req->cmnd[0] = READ_6;
901 }
902
903 *(unsigned int *)&blkvsc_req->cmnd[1] = cpu_to_be32(blkvsc_req->sector_start) >> 8;
904 blkvsc_req->cmnd[1] &= 0x1f;
905 blkvsc_req->cmnd[4] = (unsigned char) blkvsc_req->sector_count;
906 }
907}
908
909static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) )
910{
911 struct block_device_context *blkdev = blkvsc_req->dev;
912 struct device_context *device_ctx=blkdev->device_ctx;
913 struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
914 struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
915 STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
916 int ret =0;
917
918 STORVSC_REQUEST *storvsc_req;
919
920 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p type %s start_sector %llu count %d offset %d len %d\n",
921 blkvsc_req,
922 (blkvsc_req->write)?"WRITE":"READ",
923 blkvsc_req->sector_start,
924 blkvsc_req->sector_count,
925 blkvsc_req->request.DataBuffer.Offset,
926 blkvsc_req->request.DataBuffer.Length);
927
928 /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++)
929 {
930 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n",
931 blkvsc_req,
932 i,
933 blkvsc_req->request.DataBuffer.PfnArray[i]);
934 }*/
935
936 storvsc_req = &blkvsc_req->request;
937 storvsc_req->Extension = (void*)((unsigned long)blkvsc_req + sizeof(struct blkvsc_request));
938
939 storvsc_req->Type = blkvsc_req->write? WRITE_TYPE : READ_TYPE;
940
941 storvsc_req->OnIOCompletion = request_completion;
942 storvsc_req->Context = blkvsc_req;
943
944 storvsc_req->Host = blkdev->port;
945 storvsc_req->Bus = blkdev->path;
946 storvsc_req->TargetId = blkdev->target;
947 storvsc_req->LunId = 0; // this is not really used at all
948
949 storvsc_req->CdbLen = blkvsc_req->cmd_len;
950 storvsc_req->Cdb = blkvsc_req->cmnd;
951
952 storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
953 storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
954
955 ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj, &blkvsc_req->request);
956 if (ret == 0)
957 {
958 blkdev->num_outstanding_reqs++;
959 }
960
961 return ret;
962}
963
964//
965// We break the request into 1 or more blkvsc_requests and submit them.
966// If we cant submit them all, we put them on the pending_list. The
967// blkvsc_request() will work on the pending_list.
968//
969static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req)
970{
971 struct bio *bio=NULL;
972 struct bio_vec *bvec=NULL;
973 struct bio_vec *prev_bvec=NULL;
974
975 struct blkvsc_request *blkvsc_req=NULL;
976 struct blkvsc_request *tmp;
977 int databuf_idx=0;
978 int seg_idx=0;
979
980 sector_t start_sector;
981 unsigned long num_sectors = 0;
982 int ret=0;
983 int pending=0;
984 struct blkvsc_request_group *group=NULL;
985
0fce4c2f 986 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %llu \n", blkdev, req, blk_rq_pos(req));
f82bd046
HJ
987
988 // Create a group to tie req to list of blkvsc_reqs
989 group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
990 if (!group)
991 {
992 return -ENOMEM;
993 }
994
995 INIT_LIST_HEAD(&group->blkvsc_req_list);
996 group->outstanding = group->status = 0;
997
0fce4c2f 998 start_sector = blk_rq_pos(req);
f82bd046
HJ
999
1000 // foreach bio in the request
1001 if (req->bio)
1002 for (bio = req->bio; bio; bio = bio->bi_next)
1003 {
1004 // Map this bio into an existing or new storvsc request
1005 bio_for_each_segment (bvec, bio, seg_idx)
1006 {
1007 DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n",
1008 req, bio, bvec, seg_idx, databuf_idx);
1009
1010 // Get a new storvsc request
1011 if ( (!blkvsc_req) || // 1st-time
1012 (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) ||
1013 (bvec->bv_offset != 0) || // hole at the begin of page
1014 (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) ) // hold at the end of page
1015 {
1016 // submit the prev one
1017 if (blkvsc_req)
1018 {
1019 blkvsc_req->sector_start = start_sector;
1020 sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1021
1022 blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1023
1024 blkvsc_init_rw(blkvsc_req);
1025 }
1026
1027 // Create new blkvsc_req to represent the current bvec
1028 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
1029 if (!blkvsc_req)
1030 {
1031 // free up everything
1032 list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry)
1033 {
1034 list_del(&blkvsc_req->req_entry);
1035 kmem_cache_free(blkdev->request_pool, blkvsc_req);
1036 }
1037
1038 kmem_cache_free(blkdev->request_pool, group);
1039 return -ENOMEM;
1040 }
1041
1042 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
1043
1044 blkvsc_req->dev = blkdev;
1045 blkvsc_req->req = req;
1046 blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
1047 blkvsc_req->request.DataBuffer.Length = 0;
1048
1049 // Add to the group
1050 blkvsc_req->group = group;
1051 blkvsc_req->group->outstanding++;
1052 list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list);
1053
1054 start_sector += num_sectors;
1055 num_sectors = 0;
1056 databuf_idx = 0;
1057 }
1058
1059 // Add the curr bvec/segment to the curr blkvsc_req
1060 blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
1061 blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
1062
1063 prev_bvec = bvec;
1064
1065 databuf_idx++;
1066 num_sectors += bvec->bv_len >> 9;
1067
1068 } // bio_for_each_segment
1069
1070 } // rq_for_each_bio
1071
1072 // Handle the last one
1073 if (blkvsc_req)
1074 {
1075 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding);
1076
1077 blkvsc_req->sector_start = start_sector;
1078 sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1079
1080 blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1081
1082 blkvsc_init_rw(blkvsc_req);
1083 }
1084
1085 list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry)
1086 {
1087 if (pending)
1088 {
1089 DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %llu sect_count %d (%llu %d)\n",
1090 blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors);
1091
1092 list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1093 }
1094 else
1095 {
1096 ret = blkvsc_submit_request(blkvsc_req, blkvsc_request_completion);
1097 if (ret == -1)
1098 {
1099 pending = 1;
1100 list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1101 }
1102
1103 DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p start_sect %llu sect_count %d (%llu %d) ret %d\n",
1104 blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, start_sector, num_sectors, ret);
1105 }
1106 }
1107
1108 return pending;
1109}
1110
1111static void blkvsc_cmd_completion(STORVSC_REQUEST* request)
1112{
1113 struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1114 struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1115
1116 struct scsi_sense_hdr sense_hdr;
1117
1118 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n", blkvsc_req);
1119
1120 blkdev->num_outstanding_reqs--;
1121
1122 if (blkvsc_req->request.Status)
1123 {
1124 if (scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1125 {
1126 scsi_print_sense_hdr("blkvsc", &sense_hdr);
1127 }
1128 }
1129
1130 blkvsc_req->cond =1;
1131 wake_up_interruptible(&blkvsc_req->wevent);
1132}
1133
1134static void blkvsc_request_completion(STORVSC_REQUEST* request)
1135{
1136 struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1137 struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1138 unsigned long flags;
1139 struct blkvsc_request *comp_req, *tmp;
1140
1141 ASSERT(blkvsc_req->group);
1142
1143 DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s sect_start %llu sect_count %d len %d group outstd %d total outstd %d\n",
1144 blkdev,
1145 blkvsc_req,
1146 blkvsc_req->group,
1147 (blkvsc_req->write)?"WRITE":"READ",
1148 blkvsc_req->sector_start,
1149 blkvsc_req->sector_count,
1150 blkvsc_req->request.DataBuffer.Length,
1151 blkvsc_req->group->outstanding,
1152 blkdev->num_outstanding_reqs);
1153
1154 spin_lock_irqsave(&blkdev->lock, flags);
1155
1156 blkdev->num_outstanding_reqs--;
1157 blkvsc_req->group->outstanding--;
1158
1159 // Only start processing when all the blkvsc_reqs are completed. This guarantees no out-of-order
1160 // blkvsc_req completion when calling end_that_request_first()
1161 if (blkvsc_req->group->outstanding == 0)
1162 {
1163 list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry)
1164 {
1165 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %d \n",
1166 comp_req,
1167 comp_req->sector_start,
1168 comp_req->sector_count);
1169
1170 list_del(&comp_req->req_entry);
1171
1172#ifdef KERNEL_2_6_27
1173 if (!__blk_end_request(
1174 comp_req->req,
1175 (!comp_req->request.Status ? 0: -EIO),
1176 comp_req->sector_count * blkdev->sector_size))
1177 {
1178 //All the sectors have been xferred ie the request is done
1179 DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1180 kmem_cache_free(blkdev->request_pool, comp_req->group);
1181 }
1182#else
1183 if (!end_that_request_first(comp_req->req, !comp_req->request.Status, (comp_req->sector_count * (blkdev->sector_size >> 9))))
1184 {
1185 //All the sectors have been xferred ie the request is done
1186 DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1187
1188 end_that_request_last(comp_req->req, !comp_req->request.Status);
1189
1190 kmem_cache_free(blkdev->request_pool, comp_req->group);
1191 }
1192#endif
1193
1194 kmem_cache_free(blkdev->request_pool, comp_req);
1195 }
1196
1197 if (!blkdev->shutting_down)
1198 {
1199 blkvsc_do_pending_reqs(blkdev);
1200 blk_start_queue(blkdev->gd->queue);
1201 blkvsc_request(blkdev->gd->queue);
1202 }
1203 }
1204
1205 spin_unlock_irqrestore(&blkdev->lock, flags);
1206}
1207
1208static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1209{
1210 struct blkvsc_request *pend_req, *tmp;
1211 struct blkvsc_request *comp_req, *tmp2;
1212
1213 int ret=0;
1214
1215 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1216
1217 // Flush the pending list first
1218 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1219 {
1220 // The pend_req could be part of a partially completed request. If so, complete those req first
1221 // until we hit the pend_req
1222 list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry)
1223 {
1224 DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %llu sect_count %d \n",
1225 comp_req,
1226 comp_req->sector_start,
1227 comp_req->sector_count);
1228
1229 if (comp_req == pend_req)
1230 break;
1231
1232 list_del(&comp_req->req_entry);
1233
1234 if (comp_req->req)
1235 {
1236#ifdef KERNEL_2_6_27
1237 ret = __blk_end_request(
1238 comp_req->req,
1239 (!comp_req->request.Status ? 0 : -EIO),
1240 comp_req->sector_count * blkdev->sector_size);
1241#else
1242 ret = end_that_request_first(comp_req->req, !comp_req->request.Status, (comp_req->sector_count * (blkdev->sector_size >> 9)));
1243#endif
1244 ASSERT(ret != 0);
1245 }
1246
1247 kmem_cache_free(blkdev->request_pool, comp_req);
1248 }
1249
1250 DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n", pend_req);
1251
1252 list_del(&pend_req->pend_entry);
1253
1254 list_del(&pend_req->req_entry);
1255
1256 if (comp_req->req)
1257 {
1258#ifdef KERNEL_2_6_27
1259 if (!__blk_end_request(
1260 pend_req->req,
1261 -EIO,
1262 pend_req->sector_count * blkdev->sector_size))
1263 {
1264 //All the sectors have been xferred ie the request is done
1265 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1266 kmem_cache_free(blkdev->request_pool, pend_req->group);
1267 }
1268#else
1269 if (!end_that_request_first(pend_req->req, 0, (pend_req->sector_count * (blkdev->sector_size >> 9))))
1270 {
1271 //All the sectors have been xferred ie the request is done
1272 DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1273
1274 end_that_request_last(pend_req->req, 0);
1275
1276 kmem_cache_free(blkdev->request_pool, pend_req->group);
1277 }
1278#endif
1279 }
1280
1281 kmem_cache_free(blkdev->request_pool, pend_req);
1282 }
1283
1284 return ret;
1285}
1286
1287static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1288{
1289 struct blkvsc_request *pend_req, *tmp;
1290 int ret=0;
1291
1292 // Flush the pending list first
1293 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1294 {
1295 DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req);
1296
1297 ret = blkvsc_submit_request(pend_req, blkvsc_request_completion);
1298 if (ret != 0)
1299 {
1300 break;
1301 }
1302 else
1303 {
1304 list_del(&pend_req->pend_entry);
1305 }
1306 }
1307
1308 return ret;
1309}
1310
1311static void blkvsc_request(struct request_queue *queue)
1312{
1313 struct block_device_context *blkdev = NULL;
1314 struct request *req;
1315 int ret=0;
1316
1317 DPRINT_DBG(BLKVSC_DRV, "- enter \n");
0fce4c2f 1318 while ((req = blk_peek_request(queue)) != NULL)
f82bd046
HJ
1319 {
1320 DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
1321
1322 blkdev = req->rq_disk->private_data;
1323 if (blkdev->shutting_down || !blk_fs_request(req) || blkdev->media_not_present) {
0fce4c2f 1324 __blk_end_request_cur(req, 0);
f82bd046
HJ
1325 continue;
1326 }
1327
1328 ret = blkvsc_do_pending_reqs(blkdev);
1329
1330 if (ret != 0)
1331 {
1332 DPRINT_DBG(BLKVSC_DRV, "- stop queue - pending_list not empty\n");
1333 blk_stop_queue(queue);
1334 break;
1335 }
1336
0fce4c2f 1337 blk_start_request(req);
f82bd046
HJ
1338
1339 ret = blkvsc_do_request(blkdev, req);
1340 if (ret > 0)
1341 {
1342 DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
1343 blk_stop_queue(queue);
1344 break;
1345 }
1346 else if (ret < 0)
1347 {
1348 DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
1349 blk_requeue_request(queue, req);
1350 blk_stop_queue(queue);
1351 break;
1352 }
1353 }
1354}
1355
1356static int blkvsc_open(struct inode *inode, struct file *filep)
1357{
1358 struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data;
1359
1360 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1361
1362 spin_lock(&blkdev->lock);
1363
1364 if (!blkdev->users && blkdev->device_type == DVD_TYPE)
1365 {
1366 spin_unlock(&blkdev->lock);
1367 check_disk_change(inode->i_bdev);
1368 spin_lock(&blkdev->lock);
1369 }
1370
1371 blkdev->users++;
1372
1373 spin_unlock(&blkdev->lock);
1374 return 0;
1375}
1376
1377static int blkvsc_release(struct inode *inode, struct file *filep)
1378{
1379 struct block_device_context *blkdev = inode->i_bdev->bd_disk->private_data;
1380
1381 DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1382
1383 spin_lock(&blkdev->lock);
1384 if (blkdev->users == 1)
1385 {
1386 spin_unlock(&blkdev->lock);
1387 blkvsc_do_flush(blkdev);
1388 spin_lock(&blkdev->lock);
1389 }
1390
1391 blkdev->users--;
1392
1393 spin_unlock(&blkdev->lock);
1394 return 0;
1395}
1396
1397static int blkvsc_media_changed(struct gendisk *gd)
1398{
1399 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1400
1401 return 1;
1402}
1403
1404static int blkvsc_revalidate_disk(struct gendisk *gd)
1405{
1406 struct block_device_context *blkdev = gd->private_data;
1407
1408 DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1409
1410 if (blkdev->device_type == DVD_TYPE)
1411 {
1412 blkvsc_do_read_capacity(blkdev);
1413 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
0fce4c2f 1414 blk_queue_logical_block_size(gd->queue, blkdev->sector_size);
f82bd046
HJ
1415 }
1416 return 0;
1417}
1418
1419int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1420{
1421 sector_t total_sectors = get_capacity(bd->bd_disk);
1422 sector_t cylinder_times_heads=0;
1423 sector_t temp=0;
1424
1425 int sectors_per_track=0;
1426 int heads=0;
1427 int cylinders=0;
1428 int rem=0;
1429
1430 if (total_sectors > (65535 * 16 * 255)) {
1431 total_sectors = (65535 * 16 * 255);
1432 }
1433
1434 if (total_sectors >= (65535 * 16 * 63)) {
1435 sectors_per_track = 255;
1436 heads = 16;
1437
1438 cylinder_times_heads = total_sectors;
1439 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1440 }
1441 else
1442 {
1443 sectors_per_track = 17;
1444
1445 cylinder_times_heads = total_sectors;
1446 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1447
1448 temp = cylinder_times_heads + 1023;
1449 rem = sector_div(temp, 1024); // sector_div stores the quotient in temp
1450
1451 heads = temp;
1452
1453 if (heads < 4) {
1454 heads = 4;
1455 }
1456
1457 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1458 sectors_per_track = 31;
1459 heads = 16;
1460
1461 cylinder_times_heads = total_sectors;
1462 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1463 }
1464
1465 if (cylinder_times_heads >= (heads * 1024)) {
1466 sectors_per_track = 63;
1467 heads = 16;
1468
1469 cylinder_times_heads = total_sectors;
1470 rem = sector_div(cylinder_times_heads, sectors_per_track); // sector_div stores the quotient in cylinder_times_heads
1471 }
1472 }
1473
1474 temp = cylinder_times_heads;
1475 rem = sector_div(temp, heads); // sector_div stores the quotient in temp
1476 cylinders = temp;
1477
1478 hg->heads = heads;
1479 hg->sectors = sectors_per_track;
1480 hg->cylinders = cylinders;
1481
1482 DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track);
1483
1484 return 0;
1485}
1486
1487static int blkvsc_ioctl(struct inode *inode, struct file *filep, unsigned cmd, unsigned long arg)
1488{
1489 struct block_device *bd = inode->i_bdev;
1490 struct block_device_context *blkdev = bd->bd_disk->private_data;
1491 int ret=0;
1492
1493 switch (cmd)
1494 {
1495 // TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just
1496 // a GUID. Commented it out for now.
1497 /*case HDIO_GET_IDENTITY:
1498 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1499
1500 if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len))
1501 {
1502 ret = -EFAULT;
1503 }
1504
1505 break;*/
1506 default:
1507 ret = -EINVAL;
1508 break;
1509 }
1510
1511 return ret;
1512}
1513
1514
1515MODULE_LICENSE("GPL");
1516
1517static int __init blkvsc_init(void)
1518{
1519 int ret;
1520
1521 ASSERT(sizeof(sector_t) == 8); // Make sure CONFIG_LBD is set
1522
1523 DPRINT_ENTER(BLKVSC_DRV);
1524
1525 DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
1526
1527 ret = blkvsc_drv_init(BlkVscInitialize);
1528
1529 DPRINT_EXIT(BLKVSC_DRV);
1530
1531 return ret;
1532}
1533
1534static void __exit blkvsc_exit(void)
1535{
1536 DPRINT_ENTER(BLKVSC_DRV);
1537
1538 blkvsc_drv_exit();
1539
1540 DPRINT_ENTER(BLKVSC_DRV);
1541}
1542
1543module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
1544
1545module_init(blkvsc_init);
1546module_exit(blkvsc_exit);
1547
1548// eof