]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/scsi_tgt_if.c
[SCSI] libsas: better error handling in sas_expander.c
[net-next-2.6.git] / drivers / scsi / scsi_tgt_if.c
CommitLineData
97f78759
FT
1/*
2 * SCSI target kernel/user interface functions
3 *
4 * Copyright (C) 2005 FUJITA Tomonori <tomof@acm.org>
5 * Copyright (C) 2005 Mike Christie <michaelc@cs.wisc.edu>
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of the
10 * License, or (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 */
22#include <linux/miscdevice.h>
23#include <linux/file.h>
24#include <net/tcp.h>
25#include <scsi/scsi.h>
26#include <scsi/scsi_cmnd.h>
27#include <scsi/scsi_device.h>
28#include <scsi/scsi_host.h>
29#include <scsi/scsi_tgt.h>
30#include <scsi/scsi_tgt_if.h>
31
32#include "scsi_tgt_priv.h"
33
34struct tgt_ring {
35 u32 tr_idx;
36 unsigned long tr_pages[TGT_RING_PAGES];
37 spinlock_t tr_lock;
38};
39
40/* tx_ring : kernel->user, rx_ring : user->kernel */
41static struct tgt_ring tx_ring, rx_ring;
42static DECLARE_WAIT_QUEUE_HEAD(tgt_poll_wait);
43
44static inline void tgt_ring_idx_inc(struct tgt_ring *ring)
45{
46 if (ring->tr_idx == TGT_MAX_EVENTS - 1)
47 ring->tr_idx = 0;
48 else
49 ring->tr_idx++;
50}
51
52static struct tgt_event *tgt_head_event(struct tgt_ring *ring, u32 idx)
53{
54 u32 pidx, off;
55
56 pidx = idx / TGT_EVENT_PER_PAGE;
57 off = idx % TGT_EVENT_PER_PAGE;
58
59 return (struct tgt_event *)
60 (ring->tr_pages[pidx] + sizeof(struct tgt_event) * off);
61}
62
63static int tgt_uspace_send_event(u32 type, struct tgt_event *p)
64{
65 struct tgt_event *ev;
66 struct tgt_ring *ring = &tx_ring;
67 unsigned long flags;
68 int err = 0;
69
70 spin_lock_irqsave(&ring->tr_lock, flags);
71
72 ev = tgt_head_event(ring, ring->tr_idx);
73 if (!ev->hdr.status)
74 tgt_ring_idx_inc(ring);
75 else
76 err = -BUSY;
77
78 spin_unlock_irqrestore(&ring->tr_lock, flags);
79
80 if (err)
81 return err;
82
83 memcpy(ev, p, sizeof(*ev));
84 ev->hdr.type = type;
85 mb();
86 ev->hdr.status = 1;
87
88 flush_dcache_page(virt_to_page(ev));
89
90 wake_up_interruptible(&tgt_poll_wait);
91
92 return 0;
93}
94
95int scsi_tgt_uspace_send_cmd(struct scsi_cmnd *cmd, struct scsi_lun *lun, u64 tag)
96{
97 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
98 struct tgt_event ev;
99 int err;
100
101 memset(&ev, 0, sizeof(ev));
102 ev.p.cmd_req.host_no = shost->host_no;
103 ev.p.cmd_req.data_len = cmd->request_bufflen;
104 memcpy(ev.p.cmd_req.scb, cmd->cmnd, sizeof(ev.p.cmd_req.scb));
105 memcpy(ev.p.cmd_req.lun, lun, sizeof(ev.p.cmd_req.lun));
106 ev.p.cmd_req.attribute = cmd->tag;
107 ev.p.cmd_req.tag = tag;
108
109 dprintk("%p %d %u %x %llx\n", cmd, shost->host_no,
110 ev.p.cmd_req.data_len, cmd->tag,
111 (unsigned long long) ev.p.cmd_req.tag);
112
113 err = tgt_uspace_send_event(TGT_KEVENT_CMD_REQ, &ev);
114 if (err)
115 eprintk("tx buf is full, could not send\n");
116
117 return err;
118}
119
120int scsi_tgt_uspace_send_status(struct scsi_cmnd *cmd, u64 tag)
121{
122 struct Scsi_Host *shost = scsi_tgt_cmd_to_host(cmd);
123 struct tgt_event ev;
124 int err;
125
126 memset(&ev, 0, sizeof(ev));
127 ev.p.cmd_done.host_no = shost->host_no;
128 ev.p.cmd_done.tag = tag;
129 ev.p.cmd_done.result = cmd->result;
130
131 dprintk("%p %d %llu %u %x\n", cmd, shost->host_no,
132 (unsigned long long) ev.p.cmd_req.tag,
133 ev.p.cmd_req.data_len, cmd->tag);
134
135 err = tgt_uspace_send_event(TGT_KEVENT_CMD_DONE, &ev);
136 if (err)
137 eprintk("tx buf is full, could not send\n");
138
139 return err;
140}
141
142int scsi_tgt_uspace_send_tsk_mgmt(int host_no, int function, u64 tag,
143 struct scsi_lun *scsilun, void *data)
144{
145 struct tgt_event ev;
146 int err;
147
148 memset(&ev, 0, sizeof(ev));
149 ev.p.tsk_mgmt_req.host_no = host_no;
150 ev.p.tsk_mgmt_req.function = function;
151 ev.p.tsk_mgmt_req.tag = tag;
152 memcpy(ev.p.tsk_mgmt_req.lun, scsilun, sizeof(ev.p.tsk_mgmt_req.lun));
153 ev.p.tsk_mgmt_req.mid = (u64) (unsigned long) data;
154
155 dprintk("%d %x %llx %llx\n", host_no, function, (unsigned long long) tag,
156 (unsigned long long) ev.p.tsk_mgmt_req.mid);
157
158 err = tgt_uspace_send_event(TGT_KEVENT_TSK_MGMT_REQ, &ev);
159 if (err)
160 eprintk("tx buf is full, could not send\n");
161
162 return err;
163}
164
165static int event_recv_msg(struct tgt_event *ev)
166{
167 int err = 0;
168
169 switch (ev->hdr.type) {
170 case TGT_UEVENT_CMD_RSP:
171 err = scsi_tgt_kspace_exec(ev->p.cmd_rsp.host_no,
172 ev->p.cmd_rsp.tag,
173 ev->p.cmd_rsp.result,
174 ev->p.cmd_rsp.len,
175 ev->p.cmd_rsp.uaddr,
176 ev->p.cmd_rsp.rw);
177 break;
178 case TGT_UEVENT_TSK_MGMT_RSP:
179 err = scsi_tgt_kspace_tsk_mgmt(ev->p.tsk_mgmt_rsp.host_no,
180 ev->p.tsk_mgmt_rsp.mid,
181 ev->p.tsk_mgmt_rsp.result);
182 break;
183 default:
184 eprintk("unknown type %d\n", ev->hdr.type);
185 err = -EINVAL;
186 }
187
188 return err;
189}
190
191static ssize_t tgt_write(struct file *file, const char __user * buffer,
192 size_t count, loff_t * ppos)
193{
194 struct tgt_event *ev;
195 struct tgt_ring *ring = &rx_ring;
196
197 while (1) {
198 ev = tgt_head_event(ring, ring->tr_idx);
199 /* do we need this? */
200 flush_dcache_page(virt_to_page(ev));
201
202 if (!ev->hdr.status)
203 break;
204
205 tgt_ring_idx_inc(ring);
206 event_recv_msg(ev);
207 ev->hdr.status = 0;
208 };
209
210 return count;
211}
212
213static unsigned int tgt_poll(struct file * file, struct poll_table_struct *wait)
214{
215 struct tgt_event *ev;
216 struct tgt_ring *ring = &tx_ring;
217 unsigned long flags;
218 unsigned int mask = 0;
219 u32 idx;
220
221 poll_wait(file, &tgt_poll_wait, wait);
222
223 spin_lock_irqsave(&ring->tr_lock, flags);
224
225 idx = ring->tr_idx ? ring->tr_idx - 1 : TGT_MAX_EVENTS - 1;
226 ev = tgt_head_event(ring, idx);
227 if (ev->hdr.status)
228 mask |= POLLIN | POLLRDNORM;
229
230 spin_unlock_irqrestore(&ring->tr_lock, flags);
231
232 return mask;
233}
234
235static int uspace_ring_map(struct vm_area_struct *vma, unsigned long addr,
236 struct tgt_ring *ring)
237{
238 int i, err;
239
240 for (i = 0; i < TGT_RING_PAGES; i++) {
241 struct page *page = virt_to_page(ring->tr_pages[i]);
242 err = vm_insert_page(vma, addr, page);
243 if (err)
244 return err;
245 addr += PAGE_SIZE;
246 }
247
248 return 0;
249}
250
251static int tgt_mmap(struct file *filp, struct vm_area_struct *vma)
252{
253 unsigned long addr;
254 int err;
255
256 if (vma->vm_pgoff)
257 return -EINVAL;
258
259 if (vma->vm_end - vma->vm_start != TGT_RING_SIZE * 2) {
260 eprintk("mmap size must be %lu, not %lu \n",
261 TGT_RING_SIZE * 2, vma->vm_end - vma->vm_start);
262 return -EINVAL;
263 }
264
265 addr = vma->vm_start;
266 err = uspace_ring_map(vma, addr, &tx_ring);
267 if (err)
268 return err;
269 err = uspace_ring_map(vma, addr + TGT_RING_SIZE, &rx_ring);
270
271 return err;
272}
273
274static int tgt_open(struct inode *inode, struct file *file)
275{
276 tx_ring.tr_idx = rx_ring.tr_idx = 0;
277
278 return 0;
279}
280
281static struct file_operations tgt_fops = {
282 .owner = THIS_MODULE,
283 .open = tgt_open,
284 .poll = tgt_poll,
285 .write = tgt_write,
286 .mmap = tgt_mmap,
287};
288
289static struct miscdevice tgt_miscdev = {
290 .minor = MISC_DYNAMIC_MINOR,
291 .name = "tgt",
292 .fops = &tgt_fops,
293};
294
295static void tgt_ring_exit(struct tgt_ring *ring)
296{
297 int i;
298
299 for (i = 0; i < TGT_RING_PAGES; i++)
300 free_page(ring->tr_pages[i]);
301}
302
303static int tgt_ring_init(struct tgt_ring *ring)
304{
305 int i;
306
307 spin_lock_init(&ring->tr_lock);
308
309 for (i = 0; i < TGT_RING_PAGES; i++) {
310 ring->tr_pages[i] = get_zeroed_page(GFP_KERNEL);
311 if (!ring->tr_pages[i]) {
312 eprintk("out of memory\n");
313 return -ENOMEM;
314 }
315 }
316
317 return 0;
318}
319
320void scsi_tgt_if_exit(void)
321{
322 tgt_ring_exit(&tx_ring);
323 tgt_ring_exit(&rx_ring);
324 misc_deregister(&tgt_miscdev);
325}
326
327int scsi_tgt_if_init(void)
328{
329 int err;
330
331 err = tgt_ring_init(&tx_ring);
332 if (err)
333 return err;
334
335 err = tgt_ring_init(&rx_ring);
336 if (err)
337 goto free_tx_ring;
338
339 err = misc_register(&tgt_miscdev);
340 if (err)
341 goto free_rx_ring;
342
343 return 0;
344free_rx_ring:
345 tgt_ring_exit(&rx_ring);
346free_tx_ring:
347 tgt_ring_exit(&tx_ring);
348
349 return err;
350}