]>
Commit | Line | Data |
---|---|---|
f82bd046 HJ |
1 | /* |
2 | * | |
3 | * Copyright (c) 2009, Microsoft Corporation. | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms and conditions of the GNU General Public License, | |
7 | * version 2, as published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope it will be useful, but WITHOUT | |
10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
12 | * more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License along with | |
15 | * this program; if not, write to the Free Software Foundation, Inc., 59 Temple | |
16 | * Place - Suite 330, Boston, MA 02111-1307 USA. | |
17 | * | |
18 | * Authors: | |
19 | * Hank Janssen <hjanssen@microsoft.com> | |
20 | * | |
21 | */ | |
22 | ||
f82bd046 HJ |
23 | #include <linux/init.h> |
24 | #include <linux/module.h> | |
25 | #include <linux/device.h> | |
26 | #include <linux/blkdev.h> | |
27 | #include <linux/major.h> | |
28 | #include <linux/delay.h> | |
29 | #include <linux/hdreg.h> | |
30 | ||
31 | #include <scsi/scsi.h> | |
32 | #include <scsi/scsi_cmnd.h> | |
33 | #include <scsi/scsi_eh.h> | |
34 | #include <scsi/scsi_dbg.h> | |
35 | ||
0fce4c2f GKH |
36 | #include "include/logging.h" |
37 | #include "include/vmbus.h" | |
f82bd046 | 38 | |
0fce4c2f | 39 | #include "include/StorVscApi.h" |
f82bd046 | 40 | |
454f18a9 BP |
41 | |
42 | /* #defines */ | |
43 | ||
f82bd046 HJ |
44 | #define BLKVSC_MINORS 64 |
45 | ||
454f18a9 BP |
46 | |
47 | /* Data types */ | |
48 | ||
f82bd046 HJ |
49 | enum blkvsc_device_type { |
50 | UNKNOWN_DEV_TYPE, | |
51 | HARDDISK_TYPE, | |
52 | DVD_TYPE, | |
53 | }; | |
54 | ||
454f18a9 BP |
55 | /* |
56 | * This request ties the struct request and struct | |
57 | * blkvsc_request/STORVSC_REQUEST together A struct request may be | |
58 | * represented by 1 or more struct blkvsc_request | |
59 | */ | |
f82bd046 HJ |
60 | struct blkvsc_request_group { |
61 | int outstanding; | |
62 | int status; | |
63 | ||
454f18a9 | 64 | struct list_head blkvsc_req_list; /* list of blkvsc_requests */ |
f82bd046 HJ |
65 | }; |
66 | ||
67 | ||
68 | struct blkvsc_request { | |
454f18a9 | 69 | struct list_head req_entry; /* blkvsc_request_group.blkvsc_req_list */ |
f82bd046 | 70 | |
454f18a9 | 71 | struct list_head pend_entry; /* block_device_context.pending_list */ |
f82bd046 | 72 | |
454f18a9 | 73 | struct request *req; /* This may be null if we generate a request internally */ |
f82bd046 | 74 | struct block_device_context *dev; |
454f18a9 | 75 | struct blkvsc_request_group *group; /* The group this request is part of. Maybe null */ |
f82bd046 HJ |
76 | |
77 | wait_queue_head_t wevent; | |
78 | int cond; | |
79 | ||
80 | int write; | |
81 | sector_t sector_start; | |
82 | unsigned long sector_count; | |
83 | ||
84 | unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE]; | |
85 | unsigned char cmd_len; | |
86 | unsigned char cmnd[MAX_COMMAND_SIZE]; | |
87 | ||
88 | STORVSC_REQUEST request; | |
454f18a9 BP |
89 | /* !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because - */ |
90 | /* The extension buffer falls right here and is pointed to by request.Extension; */ | |
f82bd046 HJ |
91 | }; |
92 | ||
454f18a9 | 93 | /* Per device structure */ |
f82bd046 | 94 | struct block_device_context { |
454f18a9 | 95 | struct device_context *device_ctx; /* point back to our device context */ |
f82bd046 HJ |
96 | struct kmem_cache *request_pool; |
97 | spinlock_t lock; | |
98 | struct gendisk *gd; | |
99 | enum blkvsc_device_type device_type; | |
100 | struct list_head pending_list; | |
101 | ||
102 | unsigned char device_id[64]; | |
103 | unsigned int device_id_len; | |
104 | int num_outstanding_reqs; | |
105 | int shutting_down; | |
106 | int media_not_present; | |
107 | unsigned int sector_size; | |
108 | sector_t capacity; | |
109 | unsigned int port; | |
110 | unsigned char path; | |
111 | unsigned char target; | |
112 | int users; | |
113 | }; | |
114 | ||
454f18a9 | 115 | /* Per driver */ |
f82bd046 | 116 | struct blkvsc_driver_context { |
454f18a9 | 117 | /* !! These must be the first 2 fields !! */ |
f82bd046 HJ |
118 | struct driver_context drv_ctx; |
119 | STORVSC_DRIVER_OBJECT drv_obj; | |
120 | }; | |
121 | ||
454f18a9 | 122 | /* Static decl */ |
f82bd046 HJ |
123 | static int blkvsc_probe(struct device *dev); |
124 | static int blkvsc_remove(struct device *device); | |
125 | static void blkvsc_shutdown(struct device *device); | |
126 | ||
39635f7d | 127 | static int blkvsc_open(struct block_device *bdev, fmode_t mode); |
77d2d9da | 128 | static int blkvsc_release(struct gendisk *disk, fmode_t mode); |
f82bd046 HJ |
129 | static int blkvsc_media_changed(struct gendisk *gd); |
130 | static int blkvsc_revalidate_disk(struct gendisk *gd); | |
131 | static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg); | |
dfe8b2d9 BP |
132 | static int blkvsc_ioctl(struct block_device *bd, fmode_t mode, |
133 | unsigned cmd, unsigned long argument); | |
f82bd046 HJ |
134 | static void blkvsc_request(struct request_queue *queue); |
135 | static void blkvsc_request_completion(STORVSC_REQUEST* request); | |
136 | static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req); | |
137 | static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) ); | |
138 | static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req); | |
139 | static void blkvsc_cmd_completion(STORVSC_REQUEST* request); | |
140 | static int blkvsc_do_inquiry(struct block_device_context *blkdev); | |
141 | static int blkvsc_do_read_capacity(struct block_device_context *blkdev); | |
142 | static int blkvsc_do_read_capacity16(struct block_device_context *blkdev); | |
143 | static int blkvsc_do_flush(struct block_device_context *blkdev); | |
144 | static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev); | |
145 | static int blkvsc_do_pending_reqs(struct block_device_context *blkdev); | |
146 | ||
147 | ||
148 | static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE; | |
149 | ||
454f18a9 | 150 | /* The one and only one */ |
f82bd046 HJ |
151 | static struct blkvsc_driver_context g_blkvsc_drv; |
152 | ||
153 | ||
154 | static struct block_device_operations block_ops = | |
155 | { | |
156 | .owner = THIS_MODULE, | |
157 | .open = blkvsc_open, | |
158 | .release = blkvsc_release, | |
159 | .media_changed = blkvsc_media_changed, | |
160 | .revalidate_disk = blkvsc_revalidate_disk, | |
161 | .getgeo = blkvsc_getgeo, | |
162 | .ioctl = blkvsc_ioctl, | |
163 | }; | |
164 | ||
165 | /*++ | |
166 | ||
167 | Name: blkvsc_drv_init() | |
168 | ||
169 | Desc: BlkVsc driver initialization. | |
170 | ||
171 | --*/ | |
172 | int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init) | |
173 | { | |
174 | int ret=0; | |
175 | STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj; | |
176 | struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx; | |
177 | ||
178 | DPRINT_ENTER(BLKVSC_DRV); | |
179 | ||
180 | vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface); | |
181 | ||
182 | storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size; | |
183 | ||
454f18a9 | 184 | /* Callback to client driver to complete the initialization */ |
f82bd046 HJ |
185 | pfn_drv_init(&storvsc_drv_obj->Base); |
186 | ||
187 | drv_ctx->driver.name = storvsc_drv_obj->Base.name; | |
188 | memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID)); | |
189 | ||
f82bd046 HJ |
190 | drv_ctx->probe = blkvsc_probe; |
191 | drv_ctx->remove = blkvsc_remove; | |
192 | drv_ctx->shutdown = blkvsc_shutdown; | |
f82bd046 | 193 | |
454f18a9 | 194 | /* The driver belongs to vmbus */ |
5d48a1c2 | 195 | ret = vmbus_child_driver_register(drv_ctx); |
f82bd046 HJ |
196 | |
197 | DPRINT_EXIT(BLKVSC_DRV); | |
198 | ||
199 | return ret; | |
200 | } | |
201 | ||
202 | ||
203 | static int blkvsc_drv_exit_cb(struct device *dev, void *data) | |
204 | { | |
205 | struct device **curr = (struct device **)data; | |
206 | *curr = dev; | |
454f18a9 | 207 | return 1; /* stop iterating */ |
f82bd046 HJ |
208 | } |
209 | ||
210 | /*++ | |
211 | ||
212 | Name: blkvsc_drv_exit() | |
213 | ||
214 | Desc: | |
215 | ||
216 | --*/ | |
217 | void blkvsc_drv_exit(void) | |
218 | { | |
219 | STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj; | |
220 | struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx; | |
221 | ||
222 | struct device *current_dev=NULL; | |
223 | ||
f82bd046 HJ |
224 | DPRINT_ENTER(BLKVSC_DRV); |
225 | ||
226 | while (1) | |
227 | { | |
228 | current_dev = NULL; | |
229 | ||
454f18a9 | 230 | /* Get the device */ |
f82bd046 HJ |
231 | driver_for_each_device(&drv_ctx->driver, NULL, (void*)¤t_dev, blkvsc_drv_exit_cb); |
232 | ||
233 | if (current_dev == NULL) | |
234 | break; | |
235 | ||
454f18a9 | 236 | /* Initiate removal from the top-down */ |
f82bd046 HJ |
237 | device_unregister(current_dev); |
238 | } | |
239 | ||
240 | if (storvsc_drv_obj->Base.OnCleanup) | |
241 | storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base); | |
242 | ||
243 | vmbus_child_driver_unregister(drv_ctx); | |
244 | ||
245 | DPRINT_EXIT(BLKVSC_DRV); | |
246 | ||
247 | return; | |
248 | } | |
249 | ||
250 | /*++ | |
251 | ||
252 | Name: blkvsc_probe() | |
253 | ||
254 | Desc: Add a new device for this driver | |
255 | ||
256 | --*/ | |
257 | static int blkvsc_probe(struct device *device) | |
258 | { | |
259 | int ret=0; | |
260 | ||
261 | struct driver_context *driver_ctx = driver_to_driver_context(device->driver); | |
262 | struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx; | |
263 | STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj; | |
264 | ||
265 | struct device_context *device_ctx = device_to_device_context(device); | |
3d3b5518 | 266 | struct hv_device *device_obj = &device_ctx->device_obj; |
f82bd046 HJ |
267 | |
268 | struct block_device_context *blkdev=NULL; | |
269 | STORVSC_DEVICE_INFO device_info; | |
270 | int major=0; | |
271 | int devnum=0; | |
272 | ||
273 | static int ide0_registered=0; | |
274 | static int ide1_registered=0; | |
275 | ||
276 | DPRINT_ENTER(BLKVSC_DRV); | |
277 | ||
278 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter"); | |
279 | ||
280 | if (!storvsc_drv_obj->Base.OnDeviceAdd) | |
281 | { | |
282 | DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set"); | |
283 | ||
284 | ret = -1; | |
285 | goto Cleanup; | |
286 | } | |
287 | ||
288 | blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL); | |
289 | if (!blkdev) | |
290 | { | |
291 | ret = -ENOMEM; | |
292 | goto Cleanup; | |
293 | } | |
294 | ||
295 | INIT_LIST_HEAD(&blkdev->pending_list); | |
296 | ||
454f18a9 | 297 | /* Initialize what we can here */ |
f82bd046 HJ |
298 | spin_lock_init(&blkdev->lock); |
299 | ||
300 | ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request)); | |
301 | ||
454f18a9 BP |
302 | blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device), |
303 | sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0, | |
304 | SLAB_HWCACHE_ALIGN, NULL); | |
f82bd046 HJ |
305 | if (!blkdev->request_pool) |
306 | { | |
307 | ret = -ENOMEM; | |
308 | goto Cleanup; | |
309 | } | |
310 | ||
311 | ||
454f18a9 | 312 | /* Call to the vsc driver to add the device */ |
f82bd046 HJ |
313 | ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info); |
314 | if (ret != 0) | |
315 | { | |
316 | DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device"); | |
317 | goto Cleanup; | |
318 | } | |
319 | ||
320 | blkdev->device_ctx = device_ctx; | |
454f18a9 BP |
321 | blkdev->target = device_info.TargetId; /* this identified the device 0 or 1 */ |
322 | blkdev->path = device_info.PathId; /* this identified the ide ctrl 0 or 1 */ | |
f82bd046 | 323 | |
b57a68dc | 324 | dev_set_drvdata(device, blkdev); |
f82bd046 | 325 | |
454f18a9 | 326 | /* Calculate the major and device num */ |
f82bd046 HJ |
327 | if (blkdev->path == 0) |
328 | { | |
329 | major = IDE0_MAJOR; | |
454f18a9 | 330 | devnum = blkdev->path + blkdev->target; /* 0 or 1 */ |
f82bd046 HJ |
331 | |
332 | if (!ide0_registered) | |
333 | { | |
334 | ret = register_blkdev(major, "ide"); | |
335 | if (ret != 0) | |
336 | { | |
337 | DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret); | |
338 | goto Remove; | |
339 | } | |
340 | ||
341 | ide0_registered = 1; | |
342 | } | |
343 | } | |
344 | else if (blkdev->path == 1) | |
345 | { | |
346 | major = IDE1_MAJOR; | |
454f18a9 | 347 | devnum = blkdev->path + blkdev->target + 1; /* 2 or 3 */ |
f82bd046 HJ |
348 | |
349 | if (!ide1_registered) | |
350 | { | |
351 | ret = register_blkdev(major, "ide"); | |
352 | if (ret != 0) | |
353 | { | |
354 | DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret); | |
355 | goto Remove; | |
356 | } | |
357 | ||
358 | ide1_registered = 1; | |
359 | } | |
360 | ||
361 | } | |
362 | else | |
363 | { | |
364 | DPRINT_ERR(BLKVSC_DRV, "invalid pathid"); | |
365 | ret = -1; | |
366 | goto Cleanup; | |
367 | } | |
368 | ||
369 | DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major); | |
370 | ||
371 | blkdev->gd = alloc_disk(BLKVSC_MINORS); | |
372 | if (!blkdev->gd) | |
373 | { | |
374 | DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret); | |
375 | ret = -1; | |
376 | goto Cleanup; | |
377 | } | |
378 | ||
379 | blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock); | |
380 | ||
381 | blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE); | |
382 | blk_queue_max_phys_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); | |
383 | blk_queue_max_hw_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT); | |
384 | blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1); | |
385 | blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY); | |
386 | blk_queue_dma_alignment(blkdev->gd->queue, 511); | |
387 | ||
388 | blkdev->gd->major = major; | |
389 | if (devnum == 1 || devnum == 3) | |
390 | blkdev->gd->first_minor = BLKVSC_MINORS; | |
391 | else | |
392 | blkdev->gd->first_minor = 0; | |
393 | blkdev->gd->fops = &block_ops; | |
394 | blkdev->gd->private_data = blkdev; | |
395 | sprintf(blkdev->gd->disk_name, "hd%c", 'a'+ devnum); | |
396 | ||
397 | blkvsc_do_inquiry(blkdev); | |
398 | if (blkdev->device_type == DVD_TYPE) | |
399 | { | |
400 | set_disk_ro(blkdev->gd, 1); | |
401 | blkdev->gd->flags |= GENHD_FL_REMOVABLE; | |
402 | blkvsc_do_read_capacity(blkdev); | |
403 | } | |
404 | else | |
405 | { | |
406 | blkvsc_do_read_capacity16(blkdev); | |
407 | } | |
408 | ||
409 | set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512)); | |
0fce4c2f | 410 | blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size); |
454f18a9 | 411 | /* go! */ |
f82bd046 HJ |
412 | add_disk(blkdev->gd); |
413 | ||
627c156d | 414 | DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d", blkdev->gd->disk_name, (unsigned long) blkdev->capacity, blkdev->sector_size); |
f82bd046 HJ |
415 | |
416 | return ret; | |
417 | ||
418 | Remove: | |
419 | storvsc_drv_obj->Base.OnDeviceRemove(device_obj); | |
420 | ||
421 | Cleanup: | |
422 | if (blkdev) | |
423 | { | |
424 | if (blkdev->request_pool) | |
425 | { | |
426 | kmem_cache_destroy(blkdev->request_pool); | |
427 | blkdev->request_pool = NULL; | |
428 | } | |
429 | kfree(blkdev); | |
430 | blkdev = NULL; | |
431 | } | |
432 | ||
433 | DPRINT_EXIT(BLKVSC_DRV); | |
434 | ||
435 | return ret; | |
436 | } | |
437 | ||
438 | static void blkvsc_shutdown(struct device *device) | |
439 | { | |
b57a68dc | 440 | struct block_device_context *blkdev = dev_get_drvdata(device); |
f82bd046 HJ |
441 | unsigned long flags; |
442 | ||
443 | if (!blkdev) | |
444 | return; | |
445 | ||
446 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n", blkdev->users, blkdev->gd->disk_name); | |
447 | ||
448 | spin_lock_irqsave(&blkdev->lock, flags); | |
449 | ||
450 | blkdev->shutting_down = 1; | |
451 | ||
452 | blk_stop_queue(blkdev->gd->queue); | |
453 | ||
454 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
455 | ||
456 | while (blkdev->num_outstanding_reqs) | |
457 | { | |
458 | DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs); | |
459 | ||
460 | udelay(100); | |
461 | } | |
462 | ||
463 | blkvsc_do_flush(blkdev); | |
464 | ||
465 | spin_lock_irqsave(&blkdev->lock, flags); | |
466 | ||
467 | blkvsc_cancel_pending_reqs(blkdev); | |
468 | ||
469 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
470 | } | |
471 | ||
472 | static int blkvsc_do_flush(struct block_device_context *blkdev) | |
473 | { | |
474 | struct blkvsc_request *blkvsc_req=NULL; | |
475 | ||
476 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n"); | |
477 | ||
478 | if (blkdev->device_type != HARDDISK_TYPE) | |
479 | return 0; | |
480 | ||
481 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); | |
482 | if (!blkvsc_req) | |
483 | { | |
484 | return -ENOMEM; | |
485 | } | |
486 | ||
487 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
488 | init_waitqueue_head(&blkvsc_req->wevent); | |
489 | blkvsc_req->dev = blkdev; | |
490 | blkvsc_req->req = NULL; | |
491 | blkvsc_req->write = 0; | |
492 | ||
493 | blkvsc_req->request.DataBuffer.PfnArray[0] = 0; | |
494 | blkvsc_req->request.DataBuffer.Offset = 0; | |
495 | blkvsc_req->request.DataBuffer.Length = 0; | |
496 | ||
497 | blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE; | |
498 | blkvsc_req->cmd_len = 10; | |
499 | ||
454f18a9 | 500 | /* Set this here since the completion routine may be invoked and completed before we return */ |
f82bd046 HJ |
501 | blkvsc_req->cond =0; |
502 | blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); | |
503 | ||
504 | wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); | |
505 | ||
506 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
507 | ||
508 | return 0; | |
509 | } | |
510 | ||
454f18a9 | 511 | /* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */ |
f82bd046 HJ |
512 | static int blkvsc_do_inquiry(struct block_device_context *blkdev) |
513 | { | |
514 | struct blkvsc_request *blkvsc_req=NULL; | |
515 | struct page *page_buf; | |
516 | unsigned char *buf; | |
517 | unsigned char device_type; | |
518 | ||
519 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n"); | |
520 | ||
521 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); | |
522 | if (!blkvsc_req) | |
523 | { | |
524 | return -ENOMEM; | |
525 | } | |
526 | ||
527 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
528 | page_buf = alloc_page(GFP_KERNEL); | |
529 | if (!page_buf) | |
530 | { | |
531 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
532 | return -ENOMEM; | |
533 | } | |
534 | ||
535 | init_waitqueue_head(&blkvsc_req->wevent); | |
536 | blkvsc_req->dev = blkdev; | |
537 | blkvsc_req->req = NULL; | |
538 | blkvsc_req->write = 0; | |
539 | ||
540 | blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf); | |
541 | blkvsc_req->request.DataBuffer.Offset = 0; | |
542 | blkvsc_req->request.DataBuffer.Length = 64; | |
543 | ||
544 | blkvsc_req->cmnd[0] = INQUIRY; | |
454f18a9 BP |
545 | blkvsc_req->cmnd[1] = 0x1; /* Get product data */ |
546 | blkvsc_req->cmnd[2] = 0x83; /* mode page 83 */ | |
f82bd046 HJ |
547 | blkvsc_req->cmnd[4] = 64; |
548 | blkvsc_req->cmd_len = 6; | |
549 | ||
454f18a9 | 550 | /* Set this here since the completion routine may be invoked and completed before we return */ |
f82bd046 HJ |
551 | blkvsc_req->cond =0; |
552 | ||
553 | blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); | |
554 | ||
555 | DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond); | |
556 | ||
557 | wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); | |
558 | ||
559 | buf = kmap(page_buf); | |
560 | ||
04f50c4d | 561 | /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */ |
454f18a9 | 562 | /* be to le */ |
f82bd046 HJ |
563 | device_type = buf[0] & 0x1F; |
564 | ||
565 | if (device_type == 0x0) | |
566 | { | |
567 | blkdev->device_type = HARDDISK_TYPE; | |
568 | } | |
569 | else if (device_type == 0x5) | |
570 | { | |
571 | blkdev->device_type = DVD_TYPE; | |
572 | } | |
573 | else | |
574 | { | |
454f18a9 | 575 | /* TODO: this is currently unsupported device type */ |
f82bd046 HJ |
576 | blkdev->device_type = UNKNOWN_DEV_TYPE; |
577 | } | |
578 | ||
579 | DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type); | |
580 | ||
581 | blkdev->device_id_len = buf[7]; | |
582 | if (blkdev->device_id_len > 64) | |
583 | blkdev->device_id_len = 64; | |
584 | ||
585 | memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len); | |
04f50c4d | 586 | /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id, |
454f18a9 | 587 | * blkdev->device_id_len); */ |
f82bd046 HJ |
588 | |
589 | kunmap(page_buf); | |
590 | ||
591 | __free_page(page_buf); | |
592 | ||
593 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
594 | ||
595 | return 0; | |
596 | } | |
597 | ||
454f18a9 | 598 | /* Do a scsi READ_CAPACITY cmd here to get the size of the disk */ |
f82bd046 HJ |
599 | static int blkvsc_do_read_capacity(struct block_device_context *blkdev) |
600 | { | |
601 | struct blkvsc_request *blkvsc_req=NULL; | |
602 | struct page *page_buf; | |
603 | unsigned char *buf; | |
604 | struct scsi_sense_hdr sense_hdr; | |
605 | ||
606 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n"); | |
607 | ||
608 | blkdev->sector_size = 0; | |
609 | blkdev->capacity = 0; | |
454f18a9 | 610 | blkdev->media_not_present = 0; /* assume a disk is present */ |
f82bd046 HJ |
611 | |
612 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); | |
613 | if (!blkvsc_req) | |
614 | { | |
615 | return -ENOMEM; | |
616 | } | |
617 | ||
618 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
619 | page_buf = alloc_page(GFP_KERNEL); | |
620 | if (!page_buf) | |
621 | { | |
622 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
623 | return -ENOMEM; | |
624 | } | |
625 | ||
626 | init_waitqueue_head(&blkvsc_req->wevent); | |
627 | blkvsc_req->dev = blkdev; | |
628 | blkvsc_req->req = NULL; | |
629 | blkvsc_req->write = 0; | |
630 | ||
631 | blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf); | |
632 | blkvsc_req->request.DataBuffer.Offset = 0; | |
633 | blkvsc_req->request.DataBuffer.Length = 8; | |
634 | ||
635 | blkvsc_req->cmnd[0] = READ_CAPACITY; | |
636 | blkvsc_req->cmd_len = 16; | |
637 | ||
454f18a9 BP |
638 | /* |
639 | * Set this here since the completion routine may be invoked | |
640 | * and completed before we return | |
641 | */ | |
f82bd046 HJ |
642 | blkvsc_req->cond =0; |
643 | ||
644 | blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); | |
645 | ||
646 | DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond); | |
647 | ||
648 | wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); | |
649 | ||
454f18a9 | 650 | /* check error */ |
f82bd046 HJ |
651 | if (blkvsc_req->request.Status) |
652 | { | |
653 | scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr); | |
654 | ||
454f18a9 | 655 | if (sense_hdr.asc == 0x3A) /* Medium not present */ |
f82bd046 HJ |
656 | { |
657 | blkdev->media_not_present = 1; | |
658 | } | |
659 | ||
660 | return 0; | |
661 | } | |
662 | buf = kmap(page_buf); | |
663 | ||
454f18a9 | 664 | /* be to le */ |
f82bd046 HJ |
665 | blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; |
666 | blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; | |
667 | ||
668 | kunmap(page_buf); | |
669 | ||
670 | __free_page(page_buf); | |
671 | ||
672 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
673 | ||
674 | return 0; | |
675 | } | |
676 | ||
677 | ||
678 | static int blkvsc_do_read_capacity16(struct block_device_context *blkdev) | |
679 | { | |
680 | struct blkvsc_request *blkvsc_req=NULL; | |
681 | struct page *page_buf; | |
682 | unsigned char *buf; | |
683 | struct scsi_sense_hdr sense_hdr; | |
684 | ||
685 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n"); | |
686 | ||
687 | blkdev->sector_size = 0; | |
688 | blkdev->capacity = 0; | |
454f18a9 | 689 | blkdev->media_not_present = 0; /* assume a disk is present */ |
f82bd046 HJ |
690 | |
691 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL); | |
692 | if (!blkvsc_req) | |
693 | { | |
694 | return -ENOMEM; | |
695 | } | |
696 | ||
697 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
698 | page_buf = alloc_page(GFP_KERNEL); | |
699 | if (!page_buf) | |
700 | { | |
701 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
702 | return -ENOMEM; | |
703 | } | |
704 | ||
705 | init_waitqueue_head(&blkvsc_req->wevent); | |
706 | blkvsc_req->dev = blkdev; | |
707 | blkvsc_req->req = NULL; | |
708 | blkvsc_req->write = 0; | |
709 | ||
710 | blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf); | |
711 | blkvsc_req->request.DataBuffer.Offset = 0; | |
712 | blkvsc_req->request.DataBuffer.Length = 12; | |
713 | ||
454f18a9 | 714 | blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */ |
f82bd046 HJ |
715 | blkvsc_req->cmd_len = 16; |
716 | ||
454f18a9 BP |
717 | /* |
718 | * Set this here since the completion routine may be invoked | |
719 | * and completed before we return | |
720 | */ | |
f82bd046 HJ |
721 | blkvsc_req->cond =0; |
722 | ||
723 | blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion); | |
724 | ||
725 | DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond); | |
726 | ||
727 | wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond); | |
728 | ||
454f18a9 | 729 | /* check error */ |
f82bd046 HJ |
730 | if (blkvsc_req->request.Status) |
731 | { | |
732 | scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr); | |
733 | ||
454f18a9 | 734 | if (sense_hdr.asc == 0x3A) /* Medium not present */ |
f82bd046 HJ |
735 | { |
736 | blkdev->media_not_present = 1; | |
737 | } | |
738 | ||
739 | return 0; | |
740 | } | |
741 | buf = kmap(page_buf); | |
742 | ||
454f18a9 | 743 | /* be to le */ |
f82bd046 HJ |
744 | blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1; |
745 | blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]); | |
746 | ||
454f18a9 BP |
747 | /* blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; */ |
748 | /* blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; */ | |
f82bd046 HJ |
749 | |
750 | kunmap(page_buf); | |
751 | ||
752 | __free_page(page_buf); | |
753 | ||
754 | kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req); | |
755 | ||
756 | return 0; | |
757 | } | |
758 | ||
759 | /*++ | |
760 | ||
761 | Name: blkvsc_remove() | |
762 | ||
763 | Desc: Callback when our device is removed | |
764 | ||
765 | --*/ | |
766 | static int blkvsc_remove(struct device *device) | |
767 | { | |
768 | int ret=0; | |
769 | ||
770 | struct driver_context *driver_ctx = driver_to_driver_context(device->driver); | |
771 | struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx; | |
772 | STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj; | |
773 | ||
774 | struct device_context *device_ctx = device_to_device_context(device); | |
3d3b5518 | 775 | struct hv_device *device_obj = &device_ctx->device_obj; |
b57a68dc | 776 | struct block_device_context *blkdev = dev_get_drvdata(device); |
f82bd046 HJ |
777 | unsigned long flags; |
778 | ||
779 | DPRINT_ENTER(BLKVSC_DRV); | |
780 | ||
781 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n"); | |
782 | ||
783 | if (!storvsc_drv_obj->Base.OnDeviceRemove) | |
784 | { | |
785 | DPRINT_EXIT(BLKVSC_DRV); | |
786 | return -1; | |
787 | } | |
788 | ||
454f18a9 | 789 | /* Call to the vsc driver to let it know that the device is being removed */ |
f82bd046 HJ |
790 | ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj); |
791 | if (ret != 0) | |
792 | { | |
454f18a9 | 793 | /* TODO: */ |
f82bd046 HJ |
794 | DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret); |
795 | } | |
796 | ||
454f18a9 | 797 | /* Get to a known state */ |
f82bd046 HJ |
798 | spin_lock_irqsave(&blkdev->lock, flags); |
799 | ||
800 | blkdev->shutting_down = 1; | |
801 | ||
802 | blk_stop_queue(blkdev->gd->queue); | |
803 | ||
804 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
805 | ||
806 | while (blkdev->num_outstanding_reqs) | |
807 | { | |
808 | DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs); | |
809 | ||
810 | udelay(100); | |
811 | } | |
812 | ||
813 | blkvsc_do_flush(blkdev); | |
814 | ||
815 | spin_lock_irqsave(&blkdev->lock, flags); | |
816 | ||
817 | blkvsc_cancel_pending_reqs(blkdev); | |
818 | ||
819 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
820 | ||
821 | blk_cleanup_queue(blkdev->gd->queue); | |
822 | ||
823 | del_gendisk(blkdev->gd); | |
824 | ||
825 | kmem_cache_destroy(blkdev->request_pool); | |
826 | ||
827 | kfree(blkdev); | |
828 | ||
829 | DPRINT_EXIT(BLKVSC_DRV); | |
830 | ||
831 | return ret; | |
832 | } | |
833 | ||
834 | static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req) | |
835 | { | |
836 | ASSERT(blkvsc_req->req); | |
837 | ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8)); | |
838 | ||
839 | blkvsc_req->cmd_len = 16; | |
840 | ||
841 | if (blkvsc_req->sector_start > 0xffffffff) | |
842 | { | |
843 | if (rq_data_dir(blkvsc_req->req)) | |
844 | { | |
845 | blkvsc_req->write = 1; | |
846 | blkvsc_req->cmnd[0] = WRITE_16; | |
847 | } | |
848 | else | |
849 | { | |
850 | blkvsc_req->write = 0; | |
851 | blkvsc_req->cmnd[0] = READ_16; | |
852 | } | |
853 | ||
854 | blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; | |
855 | ||
856 | *(unsigned long long*)&blkvsc_req->cmnd[2] = cpu_to_be64(blkvsc_req->sector_start); | |
857 | *(unsigned int*)&blkvsc_req->cmnd[10] = cpu_to_be32(blkvsc_req->sector_count); | |
858 | } | |
859 | else if ((blkvsc_req->sector_count > 0xff) || (blkvsc_req->sector_start > 0x1fffff)) | |
860 | { | |
861 | if (rq_data_dir(blkvsc_req->req)) | |
862 | { | |
863 | blkvsc_req->write = 1; | |
864 | blkvsc_req->cmnd[0] = WRITE_10; | |
865 | } | |
866 | else | |
867 | { | |
868 | blkvsc_req->write = 0; | |
869 | blkvsc_req->cmnd[0] = READ_10; | |
870 | } | |
871 | ||
872 | blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0; | |
873 | ||
874 | *(unsigned int *)&blkvsc_req->cmnd[2] = cpu_to_be32(blkvsc_req->sector_start); | |
875 | *(unsigned short*)&blkvsc_req->cmnd[7] = cpu_to_be16(blkvsc_req->sector_count); | |
876 | } | |
877 | else | |
878 | { | |
879 | if (rq_data_dir(blkvsc_req->req)) | |
880 | { | |
881 | blkvsc_req->write = 1; | |
882 | blkvsc_req->cmnd[0] = WRITE_6; | |
883 | } | |
884 | else | |
885 | { | |
886 | blkvsc_req->write = 0; | |
887 | blkvsc_req->cmnd[0] = READ_6; | |
888 | } | |
889 | ||
890 | *(unsigned int *)&blkvsc_req->cmnd[1] = cpu_to_be32(blkvsc_req->sector_start) >> 8; | |
891 | blkvsc_req->cmnd[1] &= 0x1f; | |
892 | blkvsc_req->cmnd[4] = (unsigned char) blkvsc_req->sector_count; | |
893 | } | |
894 | } | |
895 | ||
896 | static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) ) | |
897 | { | |
898 | struct block_device_context *blkdev = blkvsc_req->dev; | |
899 | struct device_context *device_ctx=blkdev->device_ctx; | |
900 | struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver); | |
901 | struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx; | |
902 | STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj; | |
903 | int ret =0; | |
904 | ||
905 | STORVSC_REQUEST *storvsc_req; | |
906 | ||
627c156d | 907 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p type %s start_sector %lu count %ld offset %d len %d\n", |
f82bd046 HJ |
908 | blkvsc_req, |
909 | (blkvsc_req->write)?"WRITE":"READ", | |
627c156d | 910 | (unsigned long) blkvsc_req->sector_start, |
f82bd046 HJ |
911 | blkvsc_req->sector_count, |
912 | blkvsc_req->request.DataBuffer.Offset, | |
913 | blkvsc_req->request.DataBuffer.Length); | |
914 | ||
915 | /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++) | |
916 | { | |
917 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n", | |
918 | blkvsc_req, | |
919 | i, | |
920 | blkvsc_req->request.DataBuffer.PfnArray[i]); | |
921 | }*/ | |
922 | ||
923 | storvsc_req = &blkvsc_req->request; | |
924 | storvsc_req->Extension = (void*)((unsigned long)blkvsc_req + sizeof(struct blkvsc_request)); | |
925 | ||
926 | storvsc_req->Type = blkvsc_req->write? WRITE_TYPE : READ_TYPE; | |
927 | ||
928 | storvsc_req->OnIOCompletion = request_completion; | |
929 | storvsc_req->Context = blkvsc_req; | |
930 | ||
931 | storvsc_req->Host = blkdev->port; | |
932 | storvsc_req->Bus = blkdev->path; | |
933 | storvsc_req->TargetId = blkdev->target; | |
454f18a9 | 934 | storvsc_req->LunId = 0; /* this is not really used at all */ |
f82bd046 HJ |
935 | |
936 | storvsc_req->CdbLen = blkvsc_req->cmd_len; | |
937 | storvsc_req->Cdb = blkvsc_req->cmnd; | |
938 | ||
939 | storvsc_req->SenseBuffer = blkvsc_req->sense_buffer; | |
940 | storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE; | |
941 | ||
942 | ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj, &blkvsc_req->request); | |
943 | if (ret == 0) | |
944 | { | |
945 | blkdev->num_outstanding_reqs++; | |
946 | } | |
947 | ||
948 | return ret; | |
949 | } | |
950 | ||
454f18a9 BP |
951 | |
952 | /* | |
953 | * We break the request into 1 or more blkvsc_requests and submit | |
954 | * them. If we cant submit them all, we put them on the | |
955 | * pending_list. The blkvsc_request() will work on the pending_list. | |
956 | */ | |
957 | ||
f82bd046 HJ |
958 | static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req) |
959 | { | |
960 | struct bio *bio=NULL; | |
961 | struct bio_vec *bvec=NULL; | |
962 | struct bio_vec *prev_bvec=NULL; | |
963 | ||
964 | struct blkvsc_request *blkvsc_req=NULL; | |
965 | struct blkvsc_request *tmp; | |
966 | int databuf_idx=0; | |
967 | int seg_idx=0; | |
968 | ||
969 | sector_t start_sector; | |
970 | unsigned long num_sectors = 0; | |
971 | int ret=0; | |
972 | int pending=0; | |
973 | struct blkvsc_request_group *group=NULL; | |
974 | ||
627c156d | 975 | DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu \n", blkdev, req, (unsigned long) blk_rq_pos(req)); |
f82bd046 | 976 | |
454f18a9 | 977 | /* Create a group to tie req to list of blkvsc_reqs */ |
f82bd046 HJ |
978 | group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC); |
979 | if (!group) | |
980 | { | |
981 | return -ENOMEM; | |
982 | } | |
983 | ||
984 | INIT_LIST_HEAD(&group->blkvsc_req_list); | |
985 | group->outstanding = group->status = 0; | |
986 | ||
0fce4c2f | 987 | start_sector = blk_rq_pos(req); |
f82bd046 | 988 | |
454f18a9 | 989 | /* foreach bio in the request */ |
f82bd046 HJ |
990 | if (req->bio) |
991 | for (bio = req->bio; bio; bio = bio->bi_next) | |
992 | { | |
454f18a9 | 993 | /* Map this bio into an existing or new storvsc request */ |
f82bd046 HJ |
994 | bio_for_each_segment (bvec, bio, seg_idx) |
995 | { | |
996 | DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n", | |
997 | req, bio, bvec, seg_idx, databuf_idx); | |
998 | ||
454f18a9 BP |
999 | /* Get a new storvsc request */ |
1000 | if ( (!blkvsc_req) || /* 1st-time */ | |
f82bd046 | 1001 | (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) || |
454f18a9 BP |
1002 | (bvec->bv_offset != 0) || /* hole at the begin of page */ |
1003 | (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) ) /* hold at the end of page */ | |
f82bd046 | 1004 | { |
454f18a9 | 1005 | /* submit the prev one */ |
f82bd046 HJ |
1006 | if (blkvsc_req) |
1007 | { | |
1008 | blkvsc_req->sector_start = start_sector; | |
1009 | sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9)); | |
1010 | ||
1011 | blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9); | |
1012 | ||
1013 | blkvsc_init_rw(blkvsc_req); | |
1014 | } | |
1015 | ||
454f18a9 | 1016 | /* Create new blkvsc_req to represent the current bvec */ |
f82bd046 HJ |
1017 | blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC); |
1018 | if (!blkvsc_req) | |
1019 | { | |
454f18a9 | 1020 | /* free up everything */ |
f82bd046 HJ |
1021 | list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry) |
1022 | { | |
1023 | list_del(&blkvsc_req->req_entry); | |
1024 | kmem_cache_free(blkdev->request_pool, blkvsc_req); | |
1025 | } | |
1026 | ||
1027 | kmem_cache_free(blkdev->request_pool, group); | |
1028 | return -ENOMEM; | |
1029 | } | |
1030 | ||
1031 | memset(blkvsc_req, 0, sizeof(struct blkvsc_request)); | |
1032 | ||
1033 | blkvsc_req->dev = blkdev; | |
1034 | blkvsc_req->req = req; | |
1035 | blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset; | |
1036 | blkvsc_req->request.DataBuffer.Length = 0; | |
1037 | ||
454f18a9 | 1038 | /* Add to the group */ |
f82bd046 HJ |
1039 | blkvsc_req->group = group; |
1040 | blkvsc_req->group->outstanding++; | |
1041 | list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list); | |
1042 | ||
1043 | start_sector += num_sectors; | |
1044 | num_sectors = 0; | |
1045 | databuf_idx = 0; | |
1046 | } | |
1047 | ||
454f18a9 | 1048 | /* Add the curr bvec/segment to the curr blkvsc_req */ |
f82bd046 HJ |
1049 | blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page); |
1050 | blkvsc_req->request.DataBuffer.Length += bvec->bv_len; | |
1051 | ||
1052 | prev_bvec = bvec; | |
1053 | ||
1054 | databuf_idx++; | |
1055 | num_sectors += bvec->bv_len >> 9; | |
1056 | ||
454f18a9 | 1057 | } /* bio_for_each_segment */ |
f82bd046 | 1058 | |
454f18a9 | 1059 | } /* rq_for_each_bio */ |
f82bd046 | 1060 | |
454f18a9 | 1061 | /* Handle the last one */ |
f82bd046 HJ |
1062 | if (blkvsc_req) |
1063 | { | |
1064 | DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding); | |
1065 | ||
1066 | blkvsc_req->sector_start = start_sector; | |
1067 | sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9)); | |
1068 | ||
1069 | blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9); | |
1070 | ||
1071 | blkvsc_init_rw(blkvsc_req); | |
1072 | } | |
1073 | ||
1074 | list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) | |
1075 | { | |
1076 | if (pending) | |
1077 | { | |
627c156d BP |
1078 | DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %lu sect_count %ld (%lu %ld)\n", |
1079 | blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, (unsigned long) start_sector, (unsigned long) num_sectors); | |
f82bd046 HJ |
1080 | |
1081 | list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list); | |
1082 | } | |
1083 | else | |
1084 | { | |
1085 | ret = blkvsc_submit_request(blkvsc_req, blkvsc_request_completion); | |
1086 | if (ret == -1) | |
1087 | { | |
1088 | pending = 1; | |
1089 | list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list); | |
1090 | } | |
1091 | ||
627c156d BP |
1092 | DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p start_sect %lu sect_count %ld (%lu %ld) ret %d\n", |
1093 | blkvsc_req, (unsigned long) blkvsc_req->sector_start, blkvsc_req->sector_count, (unsigned long) start_sector, num_sectors, ret); | |
f82bd046 HJ |
1094 | } |
1095 | } | |
1096 | ||
1097 | return pending; | |
1098 | } | |
1099 | ||
1100 | static void blkvsc_cmd_completion(STORVSC_REQUEST* request) | |
1101 | { | |
1102 | struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context; | |
1103 | struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev; | |
1104 | ||
1105 | struct scsi_sense_hdr sense_hdr; | |
1106 | ||
1107 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n", blkvsc_req); | |
1108 | ||
1109 | blkdev->num_outstanding_reqs--; | |
1110 | ||
1111 | if (blkvsc_req->request.Status) | |
1112 | { | |
1113 | if (scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr)) | |
1114 | { | |
1115 | scsi_print_sense_hdr("blkvsc", &sense_hdr); | |
1116 | } | |
1117 | } | |
1118 | ||
1119 | blkvsc_req->cond =1; | |
1120 | wake_up_interruptible(&blkvsc_req->wevent); | |
1121 | } | |
1122 | ||
1123 | static void blkvsc_request_completion(STORVSC_REQUEST* request) | |
1124 | { | |
1125 | struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context; | |
1126 | struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev; | |
1127 | unsigned long flags; | |
1128 | struct blkvsc_request *comp_req, *tmp; | |
1129 | ||
1130 | ASSERT(blkvsc_req->group); | |
1131 | ||
627c156d | 1132 | DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s sect_start %lu sect_count %ld len %d group outstd %d total outstd %d\n", |
f82bd046 HJ |
1133 | blkdev, |
1134 | blkvsc_req, | |
1135 | blkvsc_req->group, | |
1136 | (blkvsc_req->write)?"WRITE":"READ", | |
627c156d | 1137 | (unsigned long) blkvsc_req->sector_start, |
f82bd046 HJ |
1138 | blkvsc_req->sector_count, |
1139 | blkvsc_req->request.DataBuffer.Length, | |
1140 | blkvsc_req->group->outstanding, | |
1141 | blkdev->num_outstanding_reqs); | |
1142 | ||
1143 | spin_lock_irqsave(&blkdev->lock, flags); | |
1144 | ||
1145 | blkdev->num_outstanding_reqs--; | |
1146 | blkvsc_req->group->outstanding--; | |
1147 | ||
454f18a9 BP |
1148 | /* |
1149 | * Only start processing when all the blkvsc_reqs are | |
1150 | * completed. This guarantees no out-of-order blkvsc_req | |
1151 | * completion when calling end_that_request_first() | |
1152 | */ | |
f82bd046 HJ |
1153 | if (blkvsc_req->group->outstanding == 0) |
1154 | { | |
1155 | list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry) | |
1156 | { | |
627c156d | 1157 | DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %lu sect_count %ld \n", |
f82bd046 | 1158 | comp_req, |
627c156d | 1159 | (unsigned long) comp_req->sector_start, |
f82bd046 HJ |
1160 | comp_req->sector_count); |
1161 | ||
1162 | list_del(&comp_req->req_entry); | |
1163 | ||
f82bd046 HJ |
1164 | if (!__blk_end_request( |
1165 | comp_req->req, | |
1166 | (!comp_req->request.Status ? 0: -EIO), | |
1167 | comp_req->sector_count * blkdev->sector_size)) | |
1168 | { | |
454f18a9 | 1169 | /* All the sectors have been xferred ie the request is done */ |
f82bd046 HJ |
1170 | DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req); |
1171 | kmem_cache_free(blkdev->request_pool, comp_req->group); | |
1172 | } | |
f82bd046 HJ |
1173 | |
1174 | kmem_cache_free(blkdev->request_pool, comp_req); | |
1175 | } | |
1176 | ||
1177 | if (!blkdev->shutting_down) | |
1178 | { | |
1179 | blkvsc_do_pending_reqs(blkdev); | |
1180 | blk_start_queue(blkdev->gd->queue); | |
1181 | blkvsc_request(blkdev->gd->queue); | |
1182 | } | |
1183 | } | |
1184 | ||
1185 | spin_unlock_irqrestore(&blkdev->lock, flags); | |
1186 | } | |
1187 | ||
1188 | static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev) | |
1189 | { | |
1190 | struct blkvsc_request *pend_req, *tmp; | |
1191 | struct blkvsc_request *comp_req, *tmp2; | |
1192 | ||
1193 | int ret=0; | |
1194 | ||
1195 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()"); | |
1196 | ||
454f18a9 | 1197 | /* Flush the pending list first */ |
f82bd046 HJ |
1198 | list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry) |
1199 | { | |
454f18a9 BP |
1200 | /* |
1201 | * The pend_req could be part of a partially completed | |
1202 | * request. If so, complete those req first until we | |
1203 | * hit the pend_req | |
1204 | */ | |
f82bd046 HJ |
1205 | list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry) |
1206 | { | |
627c156d | 1207 | DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %lu sect_count %ld \n", |
f82bd046 | 1208 | comp_req, |
627c156d | 1209 | (unsigned long) comp_req->sector_start, |
f82bd046 HJ |
1210 | comp_req->sector_count); |
1211 | ||
1212 | if (comp_req == pend_req) | |
1213 | break; | |
1214 | ||
1215 | list_del(&comp_req->req_entry); | |
1216 | ||
1217 | if (comp_req->req) | |
1218 | { | |
f82bd046 HJ |
1219 | ret = __blk_end_request( |
1220 | comp_req->req, | |
1221 | (!comp_req->request.Status ? 0 : -EIO), | |
1222 | comp_req->sector_count * blkdev->sector_size); | |
f82bd046 HJ |
1223 | ASSERT(ret != 0); |
1224 | } | |
1225 | ||
1226 | kmem_cache_free(blkdev->request_pool, comp_req); | |
1227 | } | |
1228 | ||
1229 | DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n", pend_req); | |
1230 | ||
1231 | list_del(&pend_req->pend_entry); | |
1232 | ||
1233 | list_del(&pend_req->req_entry); | |
1234 | ||
1235 | if (comp_req->req) | |
1236 | { | |
f82bd046 HJ |
1237 | if (!__blk_end_request( |
1238 | pend_req->req, | |
1239 | -EIO, | |
1240 | pend_req->sector_count * blkdev->sector_size)) | |
1241 | { | |
454f18a9 | 1242 | /* All the sectors have been xferred ie the request is done */ |
f82bd046 HJ |
1243 | DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req); |
1244 | kmem_cache_free(blkdev->request_pool, pend_req->group); | |
1245 | } | |
f82bd046 HJ |
1246 | } |
1247 | ||
1248 | kmem_cache_free(blkdev->request_pool, pend_req); | |
1249 | } | |
1250 | ||
1251 | return ret; | |
1252 | } | |
1253 | ||
1254 | static int blkvsc_do_pending_reqs(struct block_device_context *blkdev) | |
1255 | { | |
1256 | struct blkvsc_request *pend_req, *tmp; | |
1257 | int ret=0; | |
1258 | ||
454f18a9 | 1259 | /* Flush the pending list first */ |
f82bd046 HJ |
1260 | list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry) |
1261 | { | |
1262 | DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req); | |
1263 | ||
1264 | ret = blkvsc_submit_request(pend_req, blkvsc_request_completion); | |
1265 | if (ret != 0) | |
1266 | { | |
1267 | break; | |
1268 | } | |
1269 | else | |
1270 | { | |
1271 | list_del(&pend_req->pend_entry); | |
1272 | } | |
1273 | } | |
1274 | ||
1275 | return ret; | |
1276 | } | |
1277 | ||
1278 | static void blkvsc_request(struct request_queue *queue) | |
1279 | { | |
1280 | struct block_device_context *blkdev = NULL; | |
1281 | struct request *req; | |
1282 | int ret=0; | |
1283 | ||
1284 | DPRINT_DBG(BLKVSC_DRV, "- enter \n"); | |
0fce4c2f | 1285 | while ((req = blk_peek_request(queue)) != NULL) |
f82bd046 HJ |
1286 | { |
1287 | DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req); | |
1288 | ||
1289 | blkdev = req->rq_disk->private_data; | |
1290 | if (blkdev->shutting_down || !blk_fs_request(req) || blkdev->media_not_present) { | |
0fce4c2f | 1291 | __blk_end_request_cur(req, 0); |
f82bd046 HJ |
1292 | continue; |
1293 | } | |
1294 | ||
1295 | ret = blkvsc_do_pending_reqs(blkdev); | |
1296 | ||
1297 | if (ret != 0) | |
1298 | { | |
1299 | DPRINT_DBG(BLKVSC_DRV, "- stop queue - pending_list not empty\n"); | |
1300 | blk_stop_queue(queue); | |
1301 | break; | |
1302 | } | |
1303 | ||
0fce4c2f | 1304 | blk_start_request(req); |
f82bd046 HJ |
1305 | |
1306 | ret = blkvsc_do_request(blkdev, req); | |
1307 | if (ret > 0) | |
1308 | { | |
1309 | DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n"); | |
1310 | blk_stop_queue(queue); | |
1311 | break; | |
1312 | } | |
1313 | else if (ret < 0) | |
1314 | { | |
1315 | DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n"); | |
1316 | blk_requeue_request(queue, req); | |
1317 | blk_stop_queue(queue); | |
1318 | break; | |
1319 | } | |
1320 | } | |
1321 | } | |
1322 | ||
39635f7d | 1323 | static int blkvsc_open(struct block_device *bdev, fmode_t mode) |
f82bd046 | 1324 | { |
39635f7d | 1325 | struct block_device_context *blkdev = bdev->bd_disk->private_data; |
f82bd046 HJ |
1326 | |
1327 | DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name); | |
1328 | ||
1329 | spin_lock(&blkdev->lock); | |
1330 | ||
1331 | if (!blkdev->users && blkdev->device_type == DVD_TYPE) | |
1332 | { | |
1333 | spin_unlock(&blkdev->lock); | |
39635f7d | 1334 | check_disk_change(bdev); |
f82bd046 HJ |
1335 | spin_lock(&blkdev->lock); |
1336 | } | |
1337 | ||
1338 | blkdev->users++; | |
1339 | ||
1340 | spin_unlock(&blkdev->lock); | |
1341 | return 0; | |
1342 | } | |
1343 | ||
77d2d9da | 1344 | static int blkvsc_release(struct gendisk *disk, fmode_t mode) |
f82bd046 | 1345 | { |
77d2d9da | 1346 | struct block_device_context *blkdev = disk->private_data; |
f82bd046 HJ |
1347 | |
1348 | DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name); | |
1349 | ||
1350 | spin_lock(&blkdev->lock); | |
1351 | if (blkdev->users == 1) | |
1352 | { | |
1353 | spin_unlock(&blkdev->lock); | |
1354 | blkvsc_do_flush(blkdev); | |
1355 | spin_lock(&blkdev->lock); | |
1356 | } | |
1357 | ||
1358 | blkdev->users--; | |
1359 | ||
1360 | spin_unlock(&blkdev->lock); | |
1361 | return 0; | |
1362 | } | |
1363 | ||
1364 | static int blkvsc_media_changed(struct gendisk *gd) | |
1365 | { | |
1366 | DPRINT_DBG(BLKVSC_DRV, "- enter\n"); | |
1367 | ||
1368 | return 1; | |
1369 | } | |
1370 | ||
1371 | static int blkvsc_revalidate_disk(struct gendisk *gd) | |
1372 | { | |
1373 | struct block_device_context *blkdev = gd->private_data; | |
1374 | ||
1375 | DPRINT_DBG(BLKVSC_DRV, "- enter\n"); | |
1376 | ||
1377 | if (blkdev->device_type == DVD_TYPE) | |
1378 | { | |
1379 | blkvsc_do_read_capacity(blkdev); | |
1380 | set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512)); | |
0fce4c2f | 1381 | blk_queue_logical_block_size(gd->queue, blkdev->sector_size); |
f82bd046 HJ |
1382 | } |
1383 | return 0; | |
1384 | } | |
1385 | ||
1386 | int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg) | |
1387 | { | |
1388 | sector_t total_sectors = get_capacity(bd->bd_disk); | |
1389 | sector_t cylinder_times_heads=0; | |
1390 | sector_t temp=0; | |
1391 | ||
1392 | int sectors_per_track=0; | |
1393 | int heads=0; | |
1394 | int cylinders=0; | |
1395 | int rem=0; | |
1396 | ||
1397 | if (total_sectors > (65535 * 16 * 255)) { | |
454f18a9 | 1398 | total_sectors = (65535 * 16 * 255); |
f82bd046 HJ |
1399 | } |
1400 | ||
1401 | if (total_sectors >= (65535 * 16 * 63)) { | |
454f18a9 BP |
1402 | sectors_per_track = 255; |
1403 | heads = 16; | |
f82bd046 HJ |
1404 | |
1405 | cylinder_times_heads = total_sectors; | |
454f18a9 | 1406 | rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */ |
f82bd046 HJ |
1407 | } |
1408 | else | |
1409 | { | |
454f18a9 | 1410 | sectors_per_track = 17; |
f82bd046 HJ |
1411 | |
1412 | cylinder_times_heads = total_sectors; | |
454f18a9 | 1413 | rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */ |
f82bd046 HJ |
1414 | |
1415 | temp = cylinder_times_heads + 1023; | |
454f18a9 | 1416 | rem = sector_div(temp, 1024); /* sector_div stores the quotient in temp */ |
f82bd046 HJ |
1417 | |
1418 | heads = temp; | |
1419 | ||
454f18a9 BP |
1420 | if (heads < 4) { |
1421 | heads = 4; | |
1422 | } | |
f82bd046 | 1423 | |
454f18a9 BP |
1424 | if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) { |
1425 | sectors_per_track = 31; | |
1426 | heads = 16; | |
f82bd046 HJ |
1427 | |
1428 | cylinder_times_heads = total_sectors; | |
454f18a9 BP |
1429 | rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */ |
1430 | } | |
f82bd046 | 1431 | |
454f18a9 BP |
1432 | if (cylinder_times_heads >= (heads * 1024)) { |
1433 | sectors_per_track = 63; | |
1434 | heads = 16; | |
f82bd046 HJ |
1435 | |
1436 | cylinder_times_heads = total_sectors; | |
454f18a9 BP |
1437 | rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */ |
1438 | } | |
f82bd046 HJ |
1439 | } |
1440 | ||
1441 | temp = cylinder_times_heads; | |
454f18a9 | 1442 | rem = sector_div(temp, heads); /* sector_div stores the quotient in temp */ |
f82bd046 HJ |
1443 | cylinders = temp; |
1444 | ||
1445 | hg->heads = heads; | |
1446 | hg->sectors = sectors_per_track; | |
1447 | hg->cylinders = cylinders; | |
1448 | ||
1449 | DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track); | |
1450 | ||
1451 | return 0; | |
1452 | } | |
1453 | ||
dfe8b2d9 BP |
1454 | static int blkvsc_ioctl(struct block_device *bd, fmode_t mode, |
1455 | unsigned cmd, unsigned long argument) | |
f82bd046 | 1456 | { |
f82bd046 HJ |
1457 | struct block_device_context *blkdev = bd->bd_disk->private_data; |
1458 | int ret=0; | |
1459 | ||
1460 | switch (cmd) | |
1461 | { | |
454f18a9 BP |
1462 | /* TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just */ |
1463 | /* a GUID. Commented it out for now. */ | |
f82bd046 HJ |
1464 | /*case HDIO_GET_IDENTITY: |
1465 | DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n"); | |
1466 | ||
1467 | if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len)) | |
1468 | { | |
1469 | ret = -EFAULT; | |
1470 | } | |
1471 | ||
1472 | break;*/ | |
1473 | default: | |
1474 | ret = -EINVAL; | |
1475 | break; | |
1476 | } | |
1477 | ||
1478 | return ret; | |
1479 | } | |
1480 | ||
1481 | ||
1482 | MODULE_LICENSE("GPL"); | |
1483 | ||
1484 | static int __init blkvsc_init(void) | |
1485 | { | |
1486 | int ret; | |
1487 | ||
454f18a9 | 1488 | ASSERT(sizeof(sector_t) == 8); /* Make sure CONFIG_LBD is set */ |
f82bd046 HJ |
1489 | |
1490 | DPRINT_ENTER(BLKVSC_DRV); | |
1491 | ||
1492 | DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing...."); | |
1493 | ||
1494 | ret = blkvsc_drv_init(BlkVscInitialize); | |
1495 | ||
1496 | DPRINT_EXIT(BLKVSC_DRV); | |
1497 | ||
1498 | return ret; | |
1499 | } | |
1500 | ||
1501 | static void __exit blkvsc_exit(void) | |
1502 | { | |
1503 | DPRINT_ENTER(BLKVSC_DRV); | |
1504 | ||
1505 | blkvsc_drv_exit(); | |
1506 | ||
1507 | DPRINT_ENTER(BLKVSC_DRV); | |
1508 | } | |
1509 | ||
1510 | module_param(blkvsc_ringbuffer_size, int, S_IRUGO); | |
1511 | ||
1512 | module_init(blkvsc_init); | |
1513 | module_exit(blkvsc_exit); | |
1514 | ||
454f18a9 | 1515 | /* eof */ |