]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/scsi/ibmvscsi/rpa_vscsi.c
[SCSI] ibmvscsi: Fix oops when an interrupt is pending during probe
[net-next-2.6.git] / drivers / scsi / ibmvscsi / rpa_vscsi.c
1 /* ------------------------------------------------------------
2  * rpa_vscsi.c
3  * (C) Copyright IBM Corporation 1994, 2003
4  * Authors: Colin DeVilbiss (devilbis@us.ibm.com)
5  *          Santiago Leon (santil@us.ibm.com)
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * This program is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
20  * USA
21  *
22  * ------------------------------------------------------------
23  * RPA-specific functions of the SCSI host adapter for Virtual I/O devices
24  *
25  * This driver allows the Linux SCSI peripheral drivers to directly
26  * access devices in the hosting partition, either on an iSeries
27  * hypervisor system or a converged hypervisor system.
28  */
29
30 #include <asm/vio.h>
31 #include <asm/prom.h>
32 #include <asm/iommu.h>
33 #include <asm/hvcall.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/gfp.h>
36 #include <linux/interrupt.h>
37 #include "ibmvscsi.h"
38
39 static char partition_name[97] = "UNKNOWN";
40 static unsigned int partition_number = -1;
41
42 /* ------------------------------------------------------------
43  * Routines for managing the command/response queue
44  */
45 /**
46  * rpavscsi_handle_event: - Interrupt handler for crq events
47  * @irq:        number of irq to handle, not used
48  * @dev_instance: ibmvscsi_host_data of host that received interrupt
49  *
50  * Disables interrupts and schedules srp_task
51  * Always returns IRQ_HANDLED
52  */
53 static irqreturn_t rpavscsi_handle_event(int irq, void *dev_instance)
54 {
55         struct ibmvscsi_host_data *hostdata =
56             (struct ibmvscsi_host_data *)dev_instance;
57         vio_disable_interrupts(to_vio_dev(hostdata->dev));
58         tasklet_schedule(&hostdata->srp_task);
59         return IRQ_HANDLED;
60 }
61
62 /**
63  * release_crq_queue: - Deallocates data and unregisters CRQ
64  * @queue:      crq_queue to initialize and register
65  * @host_data:  ibmvscsi_host_data of host
66  *
67  * Frees irq, deallocates a page for messages, unmaps dma, and unregisters
68  * the crq with the hypervisor.
69  */
70 static void rpavscsi_release_crq_queue(struct crq_queue *queue,
71                                        struct ibmvscsi_host_data *hostdata,
72                                        int max_requests)
73 {
74         long rc;
75         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
76         free_irq(vdev->irq, (void *)hostdata);
77         tasklet_kill(&hostdata->srp_task);
78         do {
79                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
80         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
81         dma_unmap_single(hostdata->dev,
82                          queue->msg_token,
83                          queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
84         free_page((unsigned long)queue->msgs);
85 }
86
87 /**
88  * crq_queue_next_crq: - Returns the next entry in message queue
89  * @queue:      crq_queue to use
90  *
91  * Returns pointer to next entry in queue, or NULL if there are no new 
92  * entried in the CRQ.
93  */
94 static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
95 {
96         struct viosrp_crq *crq;
97         unsigned long flags;
98
99         spin_lock_irqsave(&queue->lock, flags);
100         crq = &queue->msgs[queue->cur];
101         if (crq->valid & 0x80) {
102                 if (++queue->cur == queue->size)
103                         queue->cur = 0;
104         } else
105                 crq = NULL;
106         spin_unlock_irqrestore(&queue->lock, flags);
107
108         return crq;
109 }
110
111 /**
112  * rpavscsi_send_crq: - Send a CRQ
113  * @hostdata:   the adapter
114  * @word1:      the first 64 bits of the data
115  * @word2:      the second 64 bits of the data
116  */
117 static int rpavscsi_send_crq(struct ibmvscsi_host_data *hostdata,
118                              u64 word1, u64 word2)
119 {
120         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
121
122         return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
123 }
124
125 /**
126  * rpavscsi_task: - Process srps asynchronously
127  * @data:       ibmvscsi_host_data of host
128  */
129 static void rpavscsi_task(void *data)
130 {
131         struct ibmvscsi_host_data *hostdata = (struct ibmvscsi_host_data *)data;
132         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
133         struct viosrp_crq *crq;
134         int done = 0;
135
136         while (!done) {
137                 /* Pull all the valid messages off the CRQ */
138                 while ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
139                         ibmvscsi_handle_crq(crq, hostdata);
140                         crq->valid = 0x00;
141                 }
142
143                 vio_enable_interrupts(vdev);
144                 if ((crq = crq_queue_next_crq(&hostdata->queue)) != NULL) {
145                         vio_disable_interrupts(vdev);
146                         ibmvscsi_handle_crq(crq, hostdata);
147                         crq->valid = 0x00;
148                 } else {
149                         done = 1;
150                 }
151         }
152 }
153
154 static void gather_partition_info(void)
155 {
156         struct device_node *rootdn;
157
158         const char *ppartition_name;
159         const unsigned int *p_number_ptr;
160
161         /* Retrieve information about this partition */
162         rootdn = of_find_node_by_path("/");
163         if (!rootdn) {
164                 return;
165         }
166
167         ppartition_name = of_get_property(rootdn, "ibm,partition-name", NULL);
168         if (ppartition_name)
169                 strncpy(partition_name, ppartition_name,
170                                 sizeof(partition_name));
171         p_number_ptr = of_get_property(rootdn, "ibm,partition-no", NULL);
172         if (p_number_ptr)
173                 partition_number = *p_number_ptr;
174         of_node_put(rootdn);
175 }
176
177 static void set_adapter_info(struct ibmvscsi_host_data *hostdata)
178 {
179         memset(&hostdata->madapter_info, 0x00,
180                         sizeof(hostdata->madapter_info));
181
182         dev_info(hostdata->dev, "SRP_VERSION: %s\n", SRP_VERSION);
183         strcpy(hostdata->madapter_info.srp_version, SRP_VERSION);
184
185         strncpy(hostdata->madapter_info.partition_name, partition_name,
186                         sizeof(hostdata->madapter_info.partition_name));
187
188         hostdata->madapter_info.partition_number = partition_number;
189
190         hostdata->madapter_info.mad_version = 1;
191         hostdata->madapter_info.os_type = 2;
192 }
193
194 /**
195  * reset_crq_queue: - resets a crq after a failure
196  * @queue:      crq_queue to initialize and register
197  * @hostdata:   ibmvscsi_host_data of host
198  *
199  */
200 static int rpavscsi_reset_crq_queue(struct crq_queue *queue,
201                                     struct ibmvscsi_host_data *hostdata)
202 {
203         int rc;
204         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
205
206         /* Close the CRQ */
207         do {
208                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
209         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
210
211         /* Clean out the queue */
212         memset(queue->msgs, 0x00, PAGE_SIZE);
213         queue->cur = 0;
214
215         set_adapter_info(hostdata);
216
217         /* And re-open it again */
218         rc = plpar_hcall_norets(H_REG_CRQ,
219                                 vdev->unit_address,
220                                 queue->msg_token, PAGE_SIZE);
221         if (rc == 2) {
222                 /* Adapter is good, but other end is not ready */
223                 dev_warn(hostdata->dev, "Partner adapter not ready\n");
224         } else if (rc != 0) {
225                 dev_warn(hostdata->dev, "couldn't register crq--rc 0x%x\n", rc);
226         }
227         return rc;
228 }
229
230 /**
231  * initialize_crq_queue: - Initializes and registers CRQ with hypervisor
232  * @queue:      crq_queue to initialize and register
233  * @hostdata:   ibmvscsi_host_data of host
234  *
235  * Allocates a page for messages, maps it for dma, and registers
236  * the crq with the hypervisor.
237  * Returns zero on success.
238  */
239 static int rpavscsi_init_crq_queue(struct crq_queue *queue,
240                                    struct ibmvscsi_host_data *hostdata,
241                                    int max_requests)
242 {
243         int rc;
244         int retrc;
245         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
246
247         queue->msgs = (struct viosrp_crq *)get_zeroed_page(GFP_KERNEL);
248
249         if (!queue->msgs)
250                 goto malloc_failed;
251         queue->size = PAGE_SIZE / sizeof(*queue->msgs);
252
253         queue->msg_token = dma_map_single(hostdata->dev, queue->msgs,
254                                           queue->size * sizeof(*queue->msgs),
255                                           DMA_BIDIRECTIONAL);
256
257         if (dma_mapping_error(hostdata->dev, queue->msg_token))
258                 goto map_failed;
259
260         gather_partition_info();
261         set_adapter_info(hostdata);
262
263         retrc = rc = plpar_hcall_norets(H_REG_CRQ,
264                                 vdev->unit_address,
265                                 queue->msg_token, PAGE_SIZE);
266         if (rc == H_RESOURCE)
267                 /* maybe kexecing and resource is busy. try a reset */
268                 rc = rpavscsi_reset_crq_queue(queue,
269                                               hostdata);
270
271         if (rc == 2) {
272                 /* Adapter is good, but other end is not ready */
273                 dev_warn(hostdata->dev, "Partner adapter not ready\n");
274                 retrc = 0;
275         } else if (rc != 0) {
276                 dev_warn(hostdata->dev, "Error %d opening adapter\n", rc);
277                 goto reg_crq_failed;
278         }
279
280         queue->cur = 0;
281         spin_lock_init(&queue->lock);
282
283         tasklet_init(&hostdata->srp_task, (void *)rpavscsi_task,
284                      (unsigned long)hostdata);
285
286         if (request_irq(vdev->irq,
287                         rpavscsi_handle_event,
288                         0, "ibmvscsi", (void *)hostdata) != 0) {
289                 dev_err(hostdata->dev, "couldn't register irq 0x%x\n",
290                         vdev->irq);
291                 goto req_irq_failed;
292         }
293
294         rc = vio_enable_interrupts(vdev);
295         if (rc != 0) {
296                 dev_err(hostdata->dev, "Error %d enabling interrupts!!!\n", rc);
297                 goto req_irq_failed;
298         }
299
300         return retrc;
301
302       req_irq_failed:
303         tasklet_kill(&hostdata->srp_task);
304         do {
305                 rc = plpar_hcall_norets(H_FREE_CRQ, vdev->unit_address);
306         } while ((rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
307       reg_crq_failed:
308         dma_unmap_single(hostdata->dev,
309                          queue->msg_token,
310                          queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL);
311       map_failed:
312         free_page((unsigned long)queue->msgs);
313       malloc_failed:
314         return -1;
315 }
316
317 /**
318  * reenable_crq_queue: - reenables a crq after
319  * @queue:      crq_queue to initialize and register
320  * @hostdata:   ibmvscsi_host_data of host
321  *
322  */
323 static int rpavscsi_reenable_crq_queue(struct crq_queue *queue,
324                                        struct ibmvscsi_host_data *hostdata)
325 {
326         int rc;
327         struct vio_dev *vdev = to_vio_dev(hostdata->dev);
328
329         /* Re-enable the CRQ */
330         do {
331                 rc = plpar_hcall_norets(H_ENABLE_CRQ, vdev->unit_address);
332         } while ((rc == H_IN_PROGRESS) || (rc == H_BUSY) || (H_IS_LONG_BUSY(rc)));
333
334         if (rc)
335                 dev_err(hostdata->dev, "Error %d enabling adapter\n", rc);
336         return rc;
337 }
338
339 /**
340  * rpavscsi_resume: - resume after suspend
341  * @hostdata:   ibmvscsi_host_data of host
342  *
343  */
344 static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
345 {
346         vio_disable_interrupts(to_vio_dev(hostdata->dev));
347         tasklet_schedule(&hostdata->srp_task);
348         return 0;
349 }
350
351 struct ibmvscsi_ops rpavscsi_ops = {
352         .init_crq_queue = rpavscsi_init_crq_queue,
353         .release_crq_queue = rpavscsi_release_crq_queue,
354         .reset_crq_queue = rpavscsi_reset_crq_queue,
355         .reenable_crq_queue = rpavscsi_reenable_crq_queue,
356         .send_crq = rpavscsi_send_crq,
357         .resume = rpavscsi_resume,
358 };