]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/staging/hv/storvsc_drv.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394...
[net-next-2.6.git] / drivers / staging / hv / storvsc_drv.c
1 /*
2  * Copyright (c) 2009, Microsoft Corporation.
3  *
4  * This program is free software; you can redistribute it and/or modify it
5  * under the terms and conditions of the GNU General Public License,
6  * version 2, as published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope it will be useful, but WITHOUT
9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
11  * more details.
12  *
13  * You should have received a copy of the GNU General Public License along with
14  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15  * Place - Suite 330, Boston, MA 02111-1307 USA.
16  *
17  * Authors:
18  *   Haiyang Zhang <haiyangz@microsoft.com>
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  */
21 #include <linux/init.h>
22 #include <linux/module.h>
23 #include <linux/device.h>
24 #include <linux/blkdev.h>
25 #include <scsi/scsi.h>
26 #include <scsi/scsi_cmnd.h>
27 #include <scsi/scsi_host.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_tcq.h>
30 #include <scsi/scsi_eh.h>
31 #include <scsi/scsi_devinfo.h>
32 #include <scsi/scsi_dbg.h>
33 #include "osd.h"
34 #include "logging.h"
35 #include "VersionInfo.h"
36 #include "vmbus.h"
37 #include "StorVscApi.h"
38
39
40 struct host_device_context {
41         /* must be 1st field
42          * FIXME this is a bug */
43         /* point back to our device context */
44         struct vm_device *device_ctx;
45         struct kmem_cache *request_pool;
46         unsigned int port;
47         unsigned char path;
48         unsigned char target;
49 };
50
51 struct storvsc_cmd_request {
52         struct list_head entry;
53         struct scsi_cmnd *cmd;
54
55         unsigned int bounce_sgl_count;
56         struct scatterlist *bounce_sgl;
57
58         struct hv_storvsc_request request;
59         /* !!!DO NOT ADD ANYTHING BELOW HERE!!! */
60         /* The extension buffer falls right here and is pointed to by
61          * request.Extension;
62          * Which sounds like a very bad design... */
63 };
64
65 struct storvsc_driver_context {
66         /* !! These must be the first 2 fields !! */
67         /* FIXME this is a bug... */
68         struct driver_context drv_ctx;
69         struct storvsc_driver_object drv_obj;
70 };
71
72 /* Static decl */
73 static int storvsc_probe(struct device *dev);
74 static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
75                                 void (*done)(struct scsi_cmnd *));
76 static int storvsc_device_alloc(struct scsi_device *);
77 static int storvsc_device_configure(struct scsi_device *);
78 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd);
79 static int storvsc_remove(struct device *dev);
80
81 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
82                                                 unsigned int sg_count,
83                                                 unsigned int len);
84 static void destroy_bounce_buffer(struct scatterlist *sgl,
85                                   unsigned int sg_count);
86 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count);
87 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
88                                             struct scatterlist *bounce_sgl,
89                                             unsigned int orig_sgl_count);
90 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
91                                           struct scatterlist *bounce_sgl,
92                                           unsigned int orig_sgl_count);
93
94 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device *bdev,
95                            sector_t capacity, int *info);
96
97
98 static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
99
100 /* The one and only one */
101 static struct storvsc_driver_context g_storvsc_drv;
102
103 /* Scsi driver */
104 static struct scsi_host_template scsi_driver = {
105         .module =               THIS_MODULE,
106         .name =                 "storvsc_host_t",
107         .bios_param =           storvsc_get_chs,
108         .queuecommand =         storvsc_queuecommand,
109         .eh_host_reset_handler =        storvsc_host_reset_handler,
110         .slave_alloc =          storvsc_device_alloc,
111         .slave_configure =      storvsc_device_configure,
112         .cmd_per_lun =          1,
113         /* 64 max_queue * 1 target */
114         .can_queue =            STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
115         .this_id =              -1,
116         /* no use setting to 0 since ll_blk_rw reset it to 1 */
117         /* currently 32 */
118         .sg_tablesize =         MAX_MULTIPAGE_BUFFER_COUNT,
119         /*
120          * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
121          * into 1 sg element. If set, we must limit the max_segment_size to
122          * PAGE_SIZE, otherwise we may get 1 sg element that represents
123          * multiple
124          */
125         /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
126         .use_clustering =       ENABLE_CLUSTERING,
127         /* Make sure we dont get a sg segment crosses a page boundary */
128         .dma_boundary =         PAGE_SIZE-1,
129 };
130
131
132 /**
133  * storvsc_drv_init - StorVsc driver initialization.
134  */
135 static int storvsc_drv_init(int (*drv_init)(struct hv_driver *drv))
136 {
137         int ret;
138         struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv.drv_obj;
139         struct driver_context *drv_ctx = &g_storvsc_drv.drv_ctx;
140
141         DPRINT_ENTER(STORVSC_DRV);
142
143         vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
144
145         storvsc_drv_obj->RingBufferSize = storvsc_ringbuffer_size;
146
147         /* Callback to client driver to complete the initialization */
148         drv_init(&storvsc_drv_obj->Base);
149
150         DPRINT_INFO(STORVSC_DRV,
151                     "request extension size %u, max outstanding reqs %u",
152                     storvsc_drv_obj->RequestExtSize,
153                     storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
154
155         if (storvsc_drv_obj->MaxOutstandingRequestsPerChannel <
156             STORVSC_MAX_IO_REQUESTS) {
157                 DPRINT_ERR(STORVSC_DRV,
158                            "The number of outstanding io requests (%d) "
159                            "is larger than that supported (%d) internally.",
160                            STORVSC_MAX_IO_REQUESTS,
161                            storvsc_drv_obj->MaxOutstandingRequestsPerChannel);
162                 return -1;
163         }
164
165         drv_ctx->driver.name = storvsc_drv_obj->Base.name;
166         memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType,
167                sizeof(struct hv_guid));
168
169         drv_ctx->probe = storvsc_probe;
170         drv_ctx->remove = storvsc_remove;
171
172         /* The driver belongs to vmbus */
173         ret = vmbus_child_driver_register(drv_ctx);
174
175         DPRINT_EXIT(STORVSC_DRV);
176
177         return ret;
178 }
179
180 static int storvsc_drv_exit_cb(struct device *dev, void *data)
181 {
182         struct device **curr = (struct device **)data;
183         *curr = dev;
184         return 1; /* stop iterating */
185 }
186
187 static void storvsc_drv_exit(void)
188 {
189         struct storvsc_driver_object *storvsc_drv_obj = &g_storvsc_drv.drv_obj;
190         struct driver_context *drv_ctx = &g_storvsc_drv.drv_ctx;
191         struct device *current_dev = NULL;
192         int ret;
193
194         DPRINT_ENTER(STORVSC_DRV);
195
196         while (1) {
197                 current_dev = NULL;
198
199                 /* Get the device */
200                 ret = driver_for_each_device(&drv_ctx->driver, NULL,
201                                              (void *) &current_dev,
202                                              storvsc_drv_exit_cb);
203
204                 if (ret)
205                         DPRINT_WARN(STORVSC_DRV,
206                                     "driver_for_each_device returned %d", ret);
207
208                 if (current_dev == NULL)
209                         break;
210
211                 /* Initiate removal from the top-down */
212                 device_unregister(current_dev);
213         }
214
215         if (storvsc_drv_obj->Base.OnCleanup)
216                 storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
217
218         vmbus_child_driver_unregister(drv_ctx);
219
220         DPRINT_EXIT(STORVSC_DRV);
221
222         return;
223 }
224
225 /**
226  * storvsc_probe - Add a new device for this driver
227  */
228 static int storvsc_probe(struct device *device)
229 {
230         int ret;
231         struct driver_context *driver_ctx =
232                                 driver_to_driver_context(device->driver);
233         struct storvsc_driver_context *storvsc_drv_ctx =
234                                 (struct storvsc_driver_context *)driver_ctx;
235         struct storvsc_driver_object *storvsc_drv_obj =
236                                 &storvsc_drv_ctx->drv_obj;
237         struct vm_device *device_ctx = device_to_vm_device(device);
238         struct hv_device *device_obj = &device_ctx->device_obj;
239         struct Scsi_Host *host;
240         struct host_device_context *host_device_ctx;
241         struct storvsc_device_info device_info;
242
243         DPRINT_ENTER(STORVSC_DRV);
244
245         if (!storvsc_drv_obj->Base.OnDeviceAdd)
246                 return -1;
247
248         host = scsi_host_alloc(&scsi_driver,
249                                sizeof(struct host_device_context));
250         if (!host) {
251                 DPRINT_ERR(STORVSC_DRV, "unable to allocate scsi host object");
252                 return -ENOMEM;
253         }
254
255         dev_set_drvdata(device, host);
256
257         host_device_ctx = (struct host_device_context *)host->hostdata;
258         memset(host_device_ctx, 0, sizeof(struct host_device_context));
259
260         host_device_ctx->port = host->host_no;
261         host_device_ctx->device_ctx = device_ctx;
262
263         host_device_ctx->request_pool =
264                                 kmem_cache_create(dev_name(&device_ctx->device),
265                                         sizeof(struct storvsc_cmd_request) +
266                                         storvsc_drv_obj->RequestExtSize, 0,
267                                         SLAB_HWCACHE_ALIGN, NULL);
268
269         if (!host_device_ctx->request_pool) {
270                 scsi_host_put(host);
271                 DPRINT_EXIT(STORVSC_DRV);
272
273                 return -ENOMEM;
274         }
275
276         device_info.PortNumber = host->host_no;
277         /* Call to the vsc driver to add the device */
278         ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj,
279                                                 (void *)&device_info);
280         if (ret != 0) {
281                 DPRINT_ERR(STORVSC_DRV, "unable to add scsi vsc device");
282                 kmem_cache_destroy(host_device_ctx->request_pool);
283                 scsi_host_put(host);
284                 DPRINT_EXIT(STORVSC_DRV);
285
286                 return -1;
287         }
288
289         /* host_device_ctx->port = device_info.PortNumber; */
290         host_device_ctx->path = device_info.PathId;
291         host_device_ctx->target = device_info.TargetId;
292
293         /* max # of devices per target */
294         host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
295         /* max # of targets per channel */
296         host->max_id = STORVSC_MAX_TARGETS;
297         /* max # of channels */
298         host->max_channel = STORVSC_MAX_CHANNELS - 1;
299
300         /* Register the HBA and start the scsi bus scan */
301         ret = scsi_add_host(host, device);
302         if (ret != 0) {
303                 DPRINT_ERR(STORVSC_DRV, "unable to add scsi host device");
304
305                 storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
306
307                 kmem_cache_destroy(host_device_ctx->request_pool);
308                 scsi_host_put(host);
309                 DPRINT_EXIT(STORVSC_DRV);
310
311                 return -1;
312         }
313
314         scsi_scan_host(host);
315
316         DPRINT_EXIT(STORVSC_DRV);
317
318         return ret;
319 }
320
321 /**
322  * storvsc_remove - Callback when our device is removed
323  */
324 static int storvsc_remove(struct device *device)
325 {
326         int ret;
327         struct driver_context *driver_ctx =
328                         driver_to_driver_context(device->driver);
329         struct storvsc_driver_context *storvsc_drv_ctx =
330                         (struct storvsc_driver_context *)driver_ctx;
331         struct storvsc_driver_object *storvsc_drv_obj =
332                         &storvsc_drv_ctx->drv_obj;
333         struct vm_device *device_ctx = device_to_vm_device(device);
334         struct hv_device *device_obj = &device_ctx->device_obj;
335         struct Scsi_Host *host = dev_get_drvdata(device);
336         struct host_device_context *host_device_ctx =
337                         (struct host_device_context *)host->hostdata;
338
339
340         DPRINT_ENTER(STORVSC_DRV);
341
342         if (!storvsc_drv_obj->Base.OnDeviceRemove) {
343                 DPRINT_EXIT(STORVSC_DRV);
344                 return -1;
345         }
346
347         /*
348          * Call to the vsc driver to let it know that the device is being
349          * removed
350          */
351         ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
352         if (ret != 0) {
353                 /* TODO: */
354                 DPRINT_ERR(STORVSC, "unable to remove vsc device (ret %d)",
355                            ret);
356         }
357
358         if (host_device_ctx->request_pool) {
359                 kmem_cache_destroy(host_device_ctx->request_pool);
360                 host_device_ctx->request_pool = NULL;
361         }
362
363         DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host);
364         scsi_remove_host(host);
365
366         DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
367         scsi_host_put(host);
368
369         DPRINT_EXIT(STORVSC_DRV);
370
371         return ret;
372 }
373
374 /**
375  * storvsc_commmand_completion - Command completion processing
376  */
377 static void storvsc_commmand_completion(struct hv_storvsc_request *request)
378 {
379         struct storvsc_cmd_request *cmd_request =
380                 (struct storvsc_cmd_request *)request->Context;
381         struct scsi_cmnd *scmnd = cmd_request->cmd;
382         struct host_device_context *host_device_ctx =
383                 (struct host_device_context *)scmnd->device->host->hostdata;
384         void (*scsi_done_fn)(struct scsi_cmnd *);
385         struct scsi_sense_hdr sense_hdr;
386
387         ASSERT(request == &cmd_request->request);
388         ASSERT((unsigned long)scmnd->host_scribble ==
389                 (unsigned long)cmd_request);
390         ASSERT(scmnd);
391         ASSERT(scmnd->scsi_done);
392
393         DPRINT_ENTER(STORVSC_DRV);
394
395         if (cmd_request->bounce_sgl_count) {
396                 /* using bounce buffer */
397                 /* printk("copy_from_bounce_buffer\n"); */
398
399                 /* FIXME: We can optimize on writes by just skipping this */
400                 copy_from_bounce_buffer(scsi_sglist(scmnd),
401                                         cmd_request->bounce_sgl,
402                                         scsi_sg_count(scmnd));
403                 destroy_bounce_buffer(cmd_request->bounce_sgl,
404                                       cmd_request->bounce_sgl_count);
405         }
406
407         scmnd->result = request->Status;
408
409         if (scmnd->result) {
410                 if (scsi_normalize_sense(scmnd->sense_buffer,
411                                          request->SenseBufferSize, &sense_hdr))
412                         scsi_print_sense_hdr("storvsc", &sense_hdr);
413         }
414
415         ASSERT(request->BytesXfer <= request->DataBuffer.Length);
416         scsi_set_resid(scmnd, request->DataBuffer.Length - request->BytesXfer);
417
418         scsi_done_fn = scmnd->scsi_done;
419
420         scmnd->host_scribble = NULL;
421         scmnd->scsi_done = NULL;
422
423         /* !!DO NOT MODIFY the scmnd after this call */
424         scsi_done_fn(scmnd);
425
426         kmem_cache_free(host_device_ctx->request_pool, cmd_request);
427
428         DPRINT_EXIT(STORVSC_DRV);
429 }
430
431 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
432 {
433         int i;
434
435         /* No need to check */
436         if (sg_count < 2)
437                 return -1;
438
439         /* We have at least 2 sg entries */
440         for (i = 0; i < sg_count; i++) {
441                 if (i == 0) {
442                         /* make sure 1st one does not have hole */
443                         if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
444                                 return i;
445                 } else if (i == sg_count - 1) {
446                         /* make sure last one does not have hole */
447                         if (sgl[i].offset != 0)
448                                 return i;
449                 } else {
450                         /* make sure no hole in the middle */
451                         if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
452                                 return i;
453                 }
454         }
455         return -1;
456 }
457
458 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
459                                                 unsigned int sg_count,
460                                                 unsigned int len)
461 {
462         int i;
463         int num_pages;
464         struct scatterlist *bounce_sgl;
465         struct page *page_buf;
466
467         num_pages = ALIGN_UP(len, PAGE_SIZE) >> PAGE_SHIFT;
468
469         bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
470         if (!bounce_sgl)
471                 return NULL;
472
473         for (i = 0; i < num_pages; i++) {
474                 page_buf = alloc_page(GFP_ATOMIC);
475                 if (!page_buf)
476                         goto cleanup;
477                 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
478         }
479
480         return bounce_sgl;
481
482 cleanup:
483         destroy_bounce_buffer(bounce_sgl, num_pages);
484         return NULL;
485 }
486
487 static void destroy_bounce_buffer(struct scatterlist *sgl,
488                                   unsigned int sg_count)
489 {
490         int i;
491         struct page *page_buf;
492
493         for (i = 0; i < sg_count; i++) {
494                 page_buf = sg_page((&sgl[i]));
495                 if (page_buf != NULL)
496                         __free_page(page_buf);
497         }
498
499         kfree(sgl);
500 }
501
502 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
503 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
504                                           struct scatterlist *bounce_sgl,
505                                           unsigned int orig_sgl_count)
506 {
507         int i;
508         int j = 0;
509         unsigned long src, dest;
510         unsigned int srclen, destlen, copylen;
511         unsigned int total_copied = 0;
512         unsigned long bounce_addr = 0;
513         unsigned long src_addr = 0;
514         unsigned long flags;
515
516         local_irq_save(flags);
517
518         for (i = 0; i < orig_sgl_count; i++) {
519                 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
520                                 KM_IRQ0) + orig_sgl[i].offset;
521                 src = src_addr;
522                 srclen = orig_sgl[i].length;
523
524                 ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
525
526                 if (j == 0)
527                         bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
528
529                 while (srclen) {
530                         /* assume bounce offset always == 0 */
531                         dest = bounce_addr + bounce_sgl[j].length;
532                         destlen = PAGE_SIZE - bounce_sgl[j].length;
533
534                         copylen = min(srclen, destlen);
535                         memcpy((void *)dest, (void *)src, copylen);
536
537                         total_copied += copylen;
538                         bounce_sgl[j].length += copylen;
539                         srclen -= copylen;
540                         src += copylen;
541
542                         if (bounce_sgl[j].length == PAGE_SIZE) {
543                                 /* full..move to next entry */
544                                 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
545                                 j++;
546
547                                 /* if we need to use another bounce buffer */
548                                 if (srclen || i != orig_sgl_count - 1)
549                                         bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
550                         } else if (srclen == 0 && i == orig_sgl_count - 1) {
551                                 /* unmap the last bounce that is < PAGE_SIZE */
552                                 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
553                         }
554                 }
555
556                 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
557         }
558
559         local_irq_restore(flags);
560
561         return total_copied;
562 }
563
564 /* Assume the original sgl has enough room */
565 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
566                                             struct scatterlist *bounce_sgl,
567                                             unsigned int orig_sgl_count)
568 {
569         int i;
570         int j = 0;
571         unsigned long src, dest;
572         unsigned int srclen, destlen, copylen;
573         unsigned int total_copied = 0;
574         unsigned long bounce_addr = 0;
575         unsigned long dest_addr = 0;
576         unsigned long flags;
577
578         local_irq_save(flags);
579
580         for (i = 0; i < orig_sgl_count; i++) {
581                 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
582                                         KM_IRQ0) + orig_sgl[i].offset;
583                 dest = dest_addr;
584                 destlen = orig_sgl[i].length;
585                 ASSERT(orig_sgl[i].offset + orig_sgl[i].length <= PAGE_SIZE);
586
587                 if (j == 0)
588                         bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
589
590                 while (destlen) {
591                         src = bounce_addr + bounce_sgl[j].offset;
592                         srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
593
594                         copylen = min(srclen, destlen);
595                         memcpy((void *)dest, (void *)src, copylen);
596
597                         total_copied += copylen;
598                         bounce_sgl[j].offset += copylen;
599                         destlen -= copylen;
600                         dest += copylen;
601
602                         if (bounce_sgl[j].offset == bounce_sgl[j].length) {
603                                 /* full */
604                                 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
605                                 j++;
606
607                                 /* if we need to use another bounce buffer */
608                                 if (destlen || i != orig_sgl_count - 1)
609                                         bounce_addr = (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])), KM_IRQ0);
610                         } else if (destlen == 0 && i == orig_sgl_count - 1) {
611                                 /* unmap the last bounce that is < PAGE_SIZE */
612                                 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
613                         }
614                 }
615
616                 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
617                               KM_IRQ0);
618         }
619
620         local_irq_restore(flags);
621
622         return total_copied;
623 }
624
625 /**
626  * storvsc_queuecommand - Initiate command processing
627  */
628 static int storvsc_queuecommand(struct scsi_cmnd *scmnd,
629                                 void (*done)(struct scsi_cmnd *))
630 {
631         int ret;
632         struct host_device_context *host_device_ctx =
633                 (struct host_device_context *)scmnd->device->host->hostdata;
634         struct vm_device *device_ctx = host_device_ctx->device_ctx;
635         struct driver_context *driver_ctx =
636                 driver_to_driver_context(device_ctx->device.driver);
637         struct storvsc_driver_context *storvsc_drv_ctx =
638                 (struct storvsc_driver_context *)driver_ctx;
639         struct storvsc_driver_object *storvsc_drv_obj =
640                 &storvsc_drv_ctx->drv_obj;
641         struct hv_storvsc_request *request;
642         struct storvsc_cmd_request *cmd_request;
643         unsigned int request_size = 0;
644         int i;
645         struct scatterlist *sgl;
646
647         DPRINT_ENTER(STORVSC_DRV);
648
649         DPRINT_DBG(STORVSC_DRV, "scmnd %p dir %d, use_sg %d buf %p len %d "
650                    "queue depth %d tagged %d", scmnd, scmnd->sc_data_direction,
651                    scsi_sg_count(scmnd), scsi_sglist(scmnd),
652                    scsi_bufflen(scmnd), scmnd->device->queue_depth,
653                    scmnd->device->tagged_supported);
654
655         /* If retrying, no need to prep the cmd */
656         if (scmnd->host_scribble) {
657                 ASSERT(scmnd->scsi_done != NULL);
658
659                 cmd_request =
660                         (struct storvsc_cmd_request *)scmnd->host_scribble;
661                 DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p",
662                             scmnd, cmd_request);
663
664                 goto retry_request;
665         }
666
667         ASSERT(scmnd->scsi_done == NULL);
668         ASSERT(scmnd->host_scribble == NULL);
669
670         scmnd->scsi_done = done;
671
672         request_size = sizeof(struct storvsc_cmd_request);
673
674         cmd_request = kmem_cache_alloc(host_device_ctx->request_pool,
675                                        GFP_ATOMIC);
676         if (!cmd_request) {
677                 DPRINT_ERR(STORVSC_DRV, "scmnd (%p) - unable to allocate "
678                            "storvsc_cmd_request...marking queue busy", scmnd);
679                 scmnd->scsi_done = NULL;
680                 return SCSI_MLQUEUE_DEVICE_BUSY;
681         }
682
683         /* Setup the cmd request */
684         cmd_request->bounce_sgl_count = 0;
685         cmd_request->bounce_sgl = NULL;
686         cmd_request->cmd = scmnd;
687
688         scmnd->host_scribble = (unsigned char *)cmd_request;
689
690         request = &cmd_request->request;
691
692         request->Extension =
693                 (void *)((unsigned long)cmd_request + request_size);
694         DPRINT_DBG(STORVSC_DRV, "req %p size %d ext %d", request, request_size,
695                    storvsc_drv_obj->RequestExtSize);
696
697         /* Build the SRB */
698         switch (scmnd->sc_data_direction) {
699         case DMA_TO_DEVICE:
700                 request->Type = WRITE_TYPE;
701                 break;
702         case DMA_FROM_DEVICE:
703                 request->Type = READ_TYPE;
704                 break;
705         default:
706                 request->Type = UNKNOWN_TYPE;
707                 break;
708         }
709
710         request->OnIOCompletion = storvsc_commmand_completion;
711         request->Context = cmd_request;/* scmnd; */
712
713         /* request->PortId = scmnd->device->channel; */
714         request->Host = host_device_ctx->port;
715         request->Bus = scmnd->device->channel;
716         request->TargetId = scmnd->device->id;
717         request->LunId = scmnd->device->lun;
718
719         ASSERT(scmnd->cmd_len <= 16);
720         request->CdbLen = scmnd->cmd_len;
721         request->Cdb = scmnd->cmnd;
722
723         request->SenseBuffer = scmnd->sense_buffer;
724         request->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
725
726
727         request->DataBuffer.Length = scsi_bufflen(scmnd);
728         if (scsi_sg_count(scmnd)) {
729                 sgl = (struct scatterlist *)scsi_sglist(scmnd);
730
731                 /* check if we need to bounce the sgl */
732                 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
733                         DPRINT_INFO(STORVSC_DRV,
734                                     "need to bounce buffer for this scmnd %p",
735                                     scmnd);
736                         cmd_request->bounce_sgl =
737                                 create_bounce_buffer(sgl, scsi_sg_count(scmnd),
738                                                      scsi_bufflen(scmnd));
739                         if (!cmd_request->bounce_sgl) {
740                                 DPRINT_ERR(STORVSC_DRV,
741                                            "unable to create bounce buffer for "
742                                            "this scmnd %p", scmnd);
743
744                                 scmnd->scsi_done = NULL;
745                                 scmnd->host_scribble = NULL;
746                                 kmem_cache_free(host_device_ctx->request_pool,
747                                                 cmd_request);
748
749                                 return SCSI_MLQUEUE_HOST_BUSY;
750                         }
751
752                         cmd_request->bounce_sgl_count =
753                                 ALIGN_UP(scsi_bufflen(scmnd), PAGE_SIZE) >>
754                                         PAGE_SHIFT;
755
756                         /*
757                          * FIXME: We can optimize on reads by just skipping
758                          * this
759                          */
760                         copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl,
761                                               scsi_sg_count(scmnd));
762
763                         sgl = cmd_request->bounce_sgl;
764                 }
765
766                 request->DataBuffer.Offset = sgl[0].offset;
767
768                 for (i = 0; i < scsi_sg_count(scmnd); i++) {
769                         DPRINT_DBG(STORVSC_DRV, "sgl[%d] len %d offset %d \n",
770                                    i, sgl[i].length, sgl[i].offset);
771                         request->DataBuffer.PfnArray[i] =
772                                         page_to_pfn(sg_page((&sgl[i])));
773                 }
774         } else if (scsi_sglist(scmnd)) {
775                 ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE);
776                 request->DataBuffer.Offset =
777                         virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
778                 request->DataBuffer.PfnArray[0] =
779                         virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
780         } else {
781                 ASSERT(scsi_bufflen(scmnd) == 0);
782         }
783
784 retry_request:
785         /* Invokes the vsc to start an IO */
786         ret = storvsc_drv_obj->OnIORequest(&device_ctx->device_obj,
787                                            &cmd_request->request);
788         if (ret == -1) {
789                 /* no more space */
790                 DPRINT_ERR(STORVSC_DRV,
791                            "scmnd (%p) - queue FULL...marking queue busy",
792                            scmnd);
793
794                 if (cmd_request->bounce_sgl_count) {
795                         /*
796                          * FIXME: We can optimize on writes by just skipping
797                          * this
798                          */
799                         copy_from_bounce_buffer(scsi_sglist(scmnd),
800                                                 cmd_request->bounce_sgl,
801                                                 scsi_sg_count(scmnd));
802                         destroy_bounce_buffer(cmd_request->bounce_sgl,
803                                               cmd_request->bounce_sgl_count);
804                 }
805
806                 kmem_cache_free(host_device_ctx->request_pool, cmd_request);
807
808                 scmnd->scsi_done = NULL;
809                 scmnd->host_scribble = NULL;
810
811                 ret = SCSI_MLQUEUE_DEVICE_BUSY;
812         }
813
814         DPRINT_EXIT(STORVSC_DRV);
815
816         return ret;
817 }
818
819 static int storvsc_merge_bvec(struct request_queue *q,
820                               struct bvec_merge_data *bmd, struct bio_vec *bvec)
821 {
822         /* checking done by caller. */
823         return bvec->bv_len;
824 }
825
826 /**
827  * storvsc_device_configure - Configure the specified scsi device
828  */
829 static int storvsc_device_alloc(struct scsi_device *sdevice)
830 {
831         DPRINT_DBG(STORVSC_DRV, "sdev (%p) - setting device flag to %d",
832                    sdevice, BLIST_SPARSELUN);
833         /*
834          * This enables luns to be located sparsely. Otherwise, we may not
835          * discovered them.
836          */
837         sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
838         return 0;
839 }
840
841 static int storvsc_device_configure(struct scsi_device *sdevice)
842 {
843         DPRINT_INFO(STORVSC_DRV, "sdev (%p) - curr queue depth %d", sdevice,
844                     sdevice->queue_depth);
845
846         DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting queue depth to %d",
847                     sdevice, STORVSC_MAX_IO_REQUESTS);
848         scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
849                                 STORVSC_MAX_IO_REQUESTS);
850
851         DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %ld",
852                     sdevice, PAGE_SIZE);
853         blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
854
855         DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine",
856                     sdevice);
857         blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
858
859         blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
860         /* sdevice->timeout = (2000 * HZ);//(75 * HZ); */
861
862         return 0;
863 }
864
865 /**
866  * storvsc_host_reset_handler - Reset the scsi HBA
867  */
868 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
869 {
870         int ret;
871         struct host_device_context *host_device_ctx =
872                 (struct host_device_context *)scmnd->device->host->hostdata;
873         struct vm_device *device_ctx = host_device_ctx->device_ctx;
874
875         DPRINT_ENTER(STORVSC_DRV);
876
877         DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...",
878                     scmnd->device, &device_ctx->device_obj);
879
880         /* Invokes the vsc to reset the host/bus */
881         ret = StorVscOnHostReset(&device_ctx->device_obj);
882         if (ret != 0) {
883                 DPRINT_EXIT(STORVSC_DRV);
884                 return ret;
885         }
886
887         DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted",
888                     scmnd->device, &device_ctx->device_obj);
889
890         DPRINT_EXIT(STORVSC_DRV);
891
892         return ret;
893 }
894
895 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
896                            sector_t capacity, int *info)
897 {
898         sector_t total_sectors = capacity;
899         sector_t cylinder_times_heads = 0;
900         sector_t temp = 0;
901
902         int sectors_per_track = 0;
903         int heads = 0;
904         int cylinders = 0;
905         int rem = 0;
906
907         if (total_sectors > (65535 * 16 * 255))
908                 total_sectors = (65535 * 16 * 255);
909
910         if (total_sectors >= (65535 * 16 * 63)) {
911                 sectors_per_track = 255;
912                 heads = 16;
913
914                 cylinder_times_heads = total_sectors;
915                 /* sector_div stores the quotient in cylinder_times_heads */
916                 rem = sector_div(cylinder_times_heads, sectors_per_track);
917         } else {
918                 sectors_per_track = 17;
919
920                 cylinder_times_heads = total_sectors;
921                 /* sector_div stores the quotient in cylinder_times_heads */
922                 rem = sector_div(cylinder_times_heads, sectors_per_track);
923
924                 temp = cylinder_times_heads + 1023;
925                 /* sector_div stores the quotient in temp */
926                 rem = sector_div(temp, 1024);
927
928                 heads = temp;
929
930                 if (heads < 4)
931                         heads = 4;
932
933                 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
934                         sectors_per_track = 31;
935                         heads = 16;
936
937                         cylinder_times_heads = total_sectors;
938                         /*
939                          * sector_div stores the quotient in
940                          * cylinder_times_heads
941                          */
942                         rem = sector_div(cylinder_times_heads,
943                                          sectors_per_track);
944                 }
945
946                 if (cylinder_times_heads >= (heads * 1024)) {
947                         sectors_per_track = 63;
948                         heads = 16;
949
950                         cylinder_times_heads = total_sectors;
951                         /*
952                          * sector_div stores the quotient in
953                          * cylinder_times_heads
954                          */
955                         rem = sector_div(cylinder_times_heads,
956                                          sectors_per_track);
957                 }
958         }
959
960         temp = cylinder_times_heads;
961         /* sector_div stores the quotient in temp */
962         rem = sector_div(temp, heads);
963         cylinders = temp;
964
965         info[0] = heads;
966         info[1] = sectors_per_track;
967         info[2] = cylinders;
968
969         DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads,
970                     sectors_per_track);
971
972     return 0;
973 }
974
975 static int __init storvsc_init(void)
976 {
977         int ret;
978
979         DPRINT_ENTER(STORVSC_DRV);
980         DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
981         ret = storvsc_drv_init(StorVscInitialize);
982         DPRINT_EXIT(STORVSC_DRV);
983         return ret;
984 }
985
986 static void __exit storvsc_exit(void)
987 {
988         DPRINT_ENTER(STORVSC_DRV);
989         storvsc_drv_exit();
990         DPRINT_ENTER(STORVSC_DRV);
991 }
992
993 MODULE_LICENSE("GPL");
994 MODULE_VERSION(HV_DRV_VERSION);
995 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
996 module_init(storvsc_init);
997 module_exit(storvsc_exit);