]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/cxgb3i/cxgb3i_pdu.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / drivers / scsi / cxgb3i / cxgb3i_pdu.c
CommitLineData
c3673464
KX
1/*
2 * cxgb3i_pdu.c: Chelsio S3xx iSCSI driver.
3 *
4 * Copyright (c) 2008 Chelsio Communications, Inc.
5 * Copyright (c) 2008 Mike Christie
6 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 *
12 * Written by: Karen Xie (kxie@chelsio.com)
13 */
14
5a0e3ad6 15#include <linux/slab.h>
c3673464
KX
16#include <linux/skbuff.h>
17#include <linux/crypto.h>
18#include <scsi/scsi_cmnd.h>
19#include <scsi/scsi_host.h>
20
21#include "cxgb3i.h"
22#include "cxgb3i_pdu.h"
23
24#ifdef __DEBUG_CXGB3I_RX__
25#define cxgb3i_rx_debug cxgb3i_log_debug
26#else
27#define cxgb3i_rx_debug(fmt...)
28#endif
29
30#ifdef __DEBUG_CXGB3I_TX__
31#define cxgb3i_tx_debug cxgb3i_log_debug
32#else
33#define cxgb3i_tx_debug(fmt...)
34#endif
35
f62d0896
KX
36/* always allocate rooms for AHS */
37#define SKB_TX_PDU_HEADER_LEN \
38 (sizeof(struct iscsi_hdr) + ISCSI_MAX_AHS_SIZE)
39static unsigned int skb_extra_headroom;
c3673464
KX
40static struct page *pad_page;
41
42/*
43 * pdu receive, interact with libiscsi_tcp
44 */
45static inline int read_pdu_skb(struct iscsi_conn *conn, struct sk_buff *skb,
46 unsigned int offset, int offloaded)
47{
48 int status = 0;
49 int bytes_read;
50
51 bytes_read = iscsi_tcp_recv_skb(conn, skb, offset, offloaded, &status);
52 switch (status) {
53 case ISCSI_TCP_CONN_ERR:
54 return -EIO;
55 case ISCSI_TCP_SUSPENDED:
56 /* no transfer - just have caller flush queue */
57 return bytes_read;
58 case ISCSI_TCP_SKB_DONE:
59 /*
60 * pdus should always fit in the skb and we should get
61 * segment done notifcation.
62 */
63 iscsi_conn_printk(KERN_ERR, conn, "Invalid pdu or skb.");
64 return -EFAULT;
65 case ISCSI_TCP_SEGMENT_DONE:
66 return bytes_read;
67 default:
68 iscsi_conn_printk(KERN_ERR, conn, "Invalid iscsi_tcp_recv_skb "
69 "status %d\n", status);
70 return -EINVAL;
71 }
72}
73
74static int cxgb3i_conn_read_pdu_skb(struct iscsi_conn *conn,
75 struct sk_buff *skb)
76{
77 struct iscsi_tcp_conn *tcp_conn = conn->dd_data;
78 bool offloaded = 0;
79 unsigned int offset;
80 int rc;
81
82 cxgb3i_rx_debug("conn 0x%p, skb 0x%p, len %u, flag 0x%x.\n",
83 conn, skb, skb->len, skb_ulp_mode(skb));
84
85 if (!iscsi_tcp_recv_segment_is_hdr(tcp_conn)) {
86 iscsi_conn_failure(conn, ISCSI_ERR_PROTO);
87 return -EIO;
88 }
89
90 if (conn->hdrdgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_HCRC_ERROR)) {
91 iscsi_conn_failure(conn, ISCSI_ERR_HDR_DGST);
92 return -EIO;
93 }
94
95 if (conn->datadgst_en && (skb_ulp_mode(skb) & ULP2_FLAG_DCRC_ERROR)) {
96 iscsi_conn_failure(conn, ISCSI_ERR_DATA_DGST);
97 return -EIO;
98 }
99
100 /* iscsi hdr */
101 rc = read_pdu_skb(conn, skb, 0, 0);
102 if (rc <= 0)
103 return rc;
104
105 if (iscsi_tcp_recv_segment_is_hdr(tcp_conn))
106 return 0;
107
108 offset = rc;
109 if (conn->hdrdgst_en)
110 offset += ISCSI_DIGEST_SIZE;
111
112 /* iscsi data */
113 if (skb_ulp_mode(skb) & ULP2_FLAG_DATA_DDPED) {
114 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, ddp'ed, "
115 "itt 0x%x.\n",
116 skb,
117 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
118 tcp_conn->in.datalen,
119 ntohl(tcp_conn->in.hdr->itt));
120 offloaded = 1;
121 } else {
122 cxgb3i_rx_debug("skb 0x%p, opcode 0x%x, data %u, NOT ddp'ed, "
123 "itt 0x%x.\n",
124 skb,
125 tcp_conn->in.hdr->opcode & ISCSI_OPCODE_MASK,
126 tcp_conn->in.datalen,
127 ntohl(tcp_conn->in.hdr->itt));
128 offset += sizeof(struct cpl_iscsi_hdr_norss);
129 }
130
131 rc = read_pdu_skb(conn, skb, offset, offloaded);
132 if (rc < 0)
133 return rc;
134 else
135 return 0;
136}
137
138/*
139 * pdu transmit, interact with libiscsi_tcp
140 */
141static inline void tx_skb_setmode(struct sk_buff *skb, int hcrc, int dcrc)
142{
143 u8 submode = 0;
144
145 if (hcrc)
146 submode |= 1;
147 if (dcrc)
148 submode |= 2;
149 skb_ulp_mode(skb) = (ULP_MODE_ISCSI << 4) | submode;
150}
151
152void cxgb3i_conn_cleanup_task(struct iscsi_task *task)
153{
f62d0896
KX
154 struct cxgb3i_task_data *tdata = task->dd_data +
155 sizeof(struct iscsi_tcp_task);
c3673464
KX
156
157 /* never reached the xmit task callout */
f62d0896
KX
158 if (tdata->skb)
159 __kfree_skb(tdata->skb);
160 memset(tdata, 0, sizeof(struct cxgb3i_task_data));
c3673464
KX
161
162 /* MNC - Do we need a check in case this is called but
163 * cxgb3i_conn_alloc_pdu has never been called on the task */
164 cxgb3i_release_itt(task, task->hdr_itt);
165 iscsi_tcp_cleanup_task(task);
166}
167
f62d0896
KX
168static int sgl_seek_offset(struct scatterlist *sgl, unsigned int sgcnt,
169 unsigned int offset, unsigned int *off,
170 struct scatterlist **sgp)
171{
172 int i;
173 struct scatterlist *sg;
174
175 for_each_sg(sgl, sg, sgcnt, i) {
176 if (offset < sg->length) {
177 *off = offset;
178 *sgp = sg;
179 return 0;
180 }
181 offset -= sg->length;
182 }
183 return -EFAULT;
184}
185
186static int sgl_read_to_frags(struct scatterlist *sg, unsigned int sgoffset,
187 unsigned int dlen, skb_frag_t *frags,
188 int frag_max)
189{
190 unsigned int datalen = dlen;
191 unsigned int sglen = sg->length - sgoffset;
192 struct page *page = sg_page(sg);
193 int i;
194
195 i = 0;
196 do {
197 unsigned int copy;
198
199 if (!sglen) {
200 sg = sg_next(sg);
201 if (!sg) {
202 cxgb3i_log_error("%s, sg NULL, len %u/%u.\n",
203 __func__, datalen, dlen);
204 return -EINVAL;
205 }
206 sgoffset = 0;
207 sglen = sg->length;
208 page = sg_page(sg);
209
210 }
211 copy = min(datalen, sglen);
212 if (i && page == frags[i - 1].page &&
213 sgoffset + sg->offset ==
214 frags[i - 1].page_offset + frags[i - 1].size) {
215 frags[i - 1].size += copy;
216 } else {
217 if (i >= frag_max) {
218 cxgb3i_log_error("%s, too many pages %u, "
219 "dlen %u.\n", __func__,
220 frag_max, dlen);
221 return -EINVAL;
222 }
223
224 frags[i].page = page;
225 frags[i].page_offset = sg->offset + sgoffset;
226 frags[i].size = copy;
227 i++;
228 }
229 datalen -= copy;
230 sgoffset += copy;
231 sglen -= copy;
232 } while (datalen);
233
234 return i;
235}
236
c3673464
KX
237int cxgb3i_conn_alloc_pdu(struct iscsi_task *task, u8 opcode)
238{
f62d0896 239 struct iscsi_conn *conn = task->conn;
c3673464 240 struct iscsi_tcp_task *tcp_task = task->dd_data;
f62d0896
KX
241 struct cxgb3i_task_data *tdata = task->dd_data + sizeof(*tcp_task);
242 struct scsi_cmnd *sc = task->sc;
243 int headroom = SKB_TX_PDU_HEADER_LEN;
c3673464 244
f62d0896 245 tcp_task->dd_data = tdata;
c3673464 246 task->hdr = NULL;
f62d0896
KX
247
248 /* write command, need to send data pdus */
249 if (skb_extra_headroom && (opcode == ISCSI_OP_SCSI_DATA_OUT ||
250 (opcode == ISCSI_OP_SCSI_CMD &&
251 (scsi_bidi_cmnd(sc) || sc->sc_data_direction == DMA_TO_DEVICE))))
252 headroom += min(skb_extra_headroom, conn->max_xmit_dlength);
253
254 tdata->skb = alloc_skb(TX_HEADER_LEN + headroom, GFP_ATOMIC);
255 if (!tdata->skb)
c3673464 256 return -ENOMEM;
f62d0896 257 skb_reserve(tdata->skb, TX_HEADER_LEN);
c3673464
KX
258
259 cxgb3i_tx_debug("task 0x%p, opcode 0x%x, skb 0x%p.\n",
f62d0896 260 task, opcode, tdata->skb);
c3673464 261
f62d0896
KX
262 task->hdr = (struct iscsi_hdr *)tdata->skb->data;
263 task->hdr_max = SKB_TX_PDU_HEADER_LEN;
c3673464
KX
264
265 /* data_out uses scsi_cmd's itt */
266 if (opcode != ISCSI_OP_SCSI_DATA_OUT)
267 cxgb3i_reserve_itt(task, &task->hdr->itt);
268
269 return 0;
270}
271
272int cxgb3i_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
273 unsigned int count)
274{
c3673464 275 struct iscsi_conn *conn = task->conn;
f62d0896
KX
276 struct iscsi_tcp_task *tcp_task = task->dd_data;
277 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
278 struct sk_buff *skb = tdata->skb;
c3673464
KX
279 unsigned int datalen = count;
280 int i, padlen = iscsi_padding(count);
f62d0896 281 struct page *pg;
c3673464
KX
282
283 cxgb3i_tx_debug("task 0x%p,0x%p, offset %u, count %u, skb 0x%p.\n",
284 task, task->sc, offset, count, skb);
285
286 skb_put(skb, task->hdr_len);
287 tx_skb_setmode(skb, conn->hdrdgst_en, datalen ? conn->datadgst_en : 0);
288 if (!count)
289 return 0;
290
291 if (task->sc) {
f62d0896
KX
292 struct scsi_data_buffer *sdb = scsi_out(task->sc);
293 struct scatterlist *sg = NULL;
294 int err;
295
296 tdata->offset = offset;
297 tdata->count = count;
298 err = sgl_seek_offset(sdb->table.sgl, sdb->table.nents,
299 tdata->offset, &tdata->sgoffset, &sg);
300 if (err < 0) {
301 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u/%u.\n",
302 sdb->table.nents, tdata->offset,
303 sdb->length);
304 return err;
c3673464 305 }
f62d0896
KX
306 err = sgl_read_to_frags(sg, tdata->sgoffset, tdata->count,
307 tdata->frags, MAX_PDU_FRAGS);
308 if (err < 0) {
309 cxgb3i_log_warn("tpdu, sgl %u, bad offset %u + %u.\n",
310 sdb->table.nents, tdata->offset,
311 tdata->count);
312 return err;
313 }
314 tdata->nr_frags = err;
315
316 if (tdata->nr_frags > MAX_SKB_FRAGS ||
317 (padlen && tdata->nr_frags == MAX_SKB_FRAGS)) {
318 char *dst = skb->data + task->hdr_len;
319 skb_frag_t *frag = tdata->frags;
320
321 /* data fits in the skb's headroom */
322 for (i = 0; i < tdata->nr_frags; i++, frag++) {
323 char *src = kmap_atomic(frag->page,
324 KM_SOFTIRQ0);
325
326 memcpy(dst, src+frag->page_offset, frag->size);
327 dst += frag->size;
328 kunmap_atomic(src, KM_SOFTIRQ0);
c3673464 329 }
f62d0896
KX
330 if (padlen) {
331 memset(dst, 0, padlen);
332 padlen = 0;
c3673464 333 }
f62d0896
KX
334 skb_put(skb, count + padlen);
335 } else {
336 /* data fit into frag_list */
337 for (i = 0; i < tdata->nr_frags; i++)
338 get_page(tdata->frags[i].page);
339
340 memcpy(skb_shinfo(skb)->frags, tdata->frags,
341 sizeof(skb_frag_t) * tdata->nr_frags);
342 skb_shinfo(skb)->nr_frags = tdata->nr_frags;
343 skb->len += count;
344 skb->data_len += count;
345 skb->truesize += count;
346 }
347
c3673464
KX
348 } else {
349 pg = virt_to_page(task->data);
350
f62d0896
KX
351 get_page(pg);
352 skb_fill_page_desc(skb, 0, pg, offset_in_page(task->data),
353 count);
354 skb->len += count;
355 skb->data_len += count;
356 skb->truesize += count;
c3673464
KX
357 }
358
359 if (padlen) {
360 i = skb_shinfo(skb)->nr_frags;
f62d0896
KX
361 get_page(pad_page);
362 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, pad_page, 0,
363 padlen);
364
365 skb->data_len += padlen;
366 skb->truesize += padlen;
367 skb->len += padlen;
c3673464
KX
368 }
369
c3673464
KX
370 return 0;
371}
372
373int cxgb3i_conn_xmit_pdu(struct iscsi_task *task)
374{
c3673464
KX
375 struct iscsi_tcp_conn *tcp_conn = task->conn->dd_data;
376 struct cxgb3i_conn *cconn = tcp_conn->dd_data;
f62d0896
KX
377 struct iscsi_tcp_task *tcp_task = task->dd_data;
378 struct cxgb3i_task_data *tdata = tcp_task->dd_data;
379 struct sk_buff *skb = tdata->skb;
c3673464
KX
380 unsigned int datalen;
381 int err;
382
383 if (!skb)
384 return 0;
385
386 datalen = skb->data_len;
f62d0896 387 tdata->skb = NULL;
c3673464 388 err = cxgb3i_c3cn_send_pdus(cconn->cep->c3cn, skb);
c3673464
KX
389 if (err > 0) {
390 int pdulen = err;
391
0109abff 392 cxgb3i_tx_debug("task 0x%p, skb 0x%p, len %u/%u, rv %d.\n",
393 task, skb, skb->len, skb->data_len, err);
f62d0896 394
c3673464
KX
395 if (task->conn->hdrdgst_en)
396 pdulen += ISCSI_DIGEST_SIZE;
397 if (datalen && task->conn->datadgst_en)
398 pdulen += ISCSI_DIGEST_SIZE;
399
400 task->conn->txdata_octets += pdulen;
401 return 0;
402 }
403
1393109f
MC
404 if (err == -EAGAIN || err == -ENOBUFS) {
405 /* reset skb to send when we are called again */
406 tdata->skb = skb;
c3673464
KX
407 return err;
408 }
1393109f
MC
409
410 kfree_skb(skb);
411 cxgb3i_tx_debug("itt 0x%x, skb 0x%p, len %u/%u, xmit err %d.\n",
412 task->itt, skb, skb->len, skb->data_len, err);
413 iscsi_conn_printk(KERN_ERR, task->conn, "xmit err %d.\n", err);
414 iscsi_conn_failure(task->conn, ISCSI_ERR_XMIT_FAILED);
415 return err;
c3673464
KX
416}
417
418int cxgb3i_pdu_init(void)
419{
f62d0896
KX
420 if (SKB_TX_HEADROOM > (512 * MAX_SKB_FRAGS))
421 skb_extra_headroom = SKB_TX_HEADROOM;
c3673464
KX
422 pad_page = alloc_page(GFP_KERNEL);
423 if (!pad_page)
424 return -ENOMEM;
425 memset(page_address(pad_page), 0, PAGE_SIZE);
426 return 0;
427}
428
429void cxgb3i_pdu_cleanup(void)
430{
431 if (pad_page) {
432 __free_page(pad_page);
433 pad_page = NULL;
434 }
435}
436
437void cxgb3i_conn_pdu_ready(struct s3_conn *c3cn)
438{
439 struct sk_buff *skb;
440 unsigned int read = 0;
441 struct iscsi_conn *conn = c3cn->user_data;
442 int err = 0;
443
444 cxgb3i_rx_debug("cn 0x%p.\n", c3cn);
445
446 read_lock(&c3cn->callback_lock);
447 if (unlikely(!conn || conn->suspend_rx)) {
448 cxgb3i_rx_debug("conn 0x%p, id %d, suspend_rx %lu!\n",
449 conn, conn ? conn->id : 0xFF,
450 conn ? conn->suspend_rx : 0xFF);
451 read_unlock(&c3cn->callback_lock);
452 return;
453 }
454 skb = skb_peek(&c3cn->receive_queue);
455 while (!err && skb) {
456 __skb_unlink(skb, &c3cn->receive_queue);
f62d0896
KX
457 read += skb_rx_pdulen(skb);
458 cxgb3i_rx_debug("conn 0x%p, cn 0x%p, rx skb 0x%p, pdulen %u.\n",
459 conn, c3cn, skb, skb_rx_pdulen(skb));
c3673464
KX
460 err = cxgb3i_conn_read_pdu_skb(conn, skb);
461 __kfree_skb(skb);
462 skb = skb_peek(&c3cn->receive_queue);
463 }
464 read_unlock(&c3cn->callback_lock);
0ed8570e
JS
465 c3cn->copied_seq += read;
466 cxgb3i_c3cn_rx_credits(c3cn, read);
c3673464 467 conn->rxdata_octets += read;
f62d0896
KX
468
469 if (err) {
470 cxgb3i_log_info("conn 0x%p rx failed err %d.\n", conn, err);
471 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
472 }
c3673464
KX
473}
474
475void cxgb3i_conn_tx_open(struct s3_conn *c3cn)
476{
477 struct iscsi_conn *conn = c3cn->user_data;
478
479 cxgb3i_tx_debug("cn 0x%p.\n", c3cn);
480 if (conn) {
481 cxgb3i_tx_debug("cn 0x%p, cid %d.\n", c3cn, conn->id);
32ae763e 482 iscsi_conn_queue_work(conn);
c3673464
KX
483 }
484}
485
486void cxgb3i_conn_closing(struct s3_conn *c3cn)
487{
488 struct iscsi_conn *conn;
489
490 read_lock(&c3cn->callback_lock);
491 conn = c3cn->user_data;
492 if (conn && c3cn->state != C3CN_STATE_ESTABLISHED)
493 iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
494 read_unlock(&c3cn->callback_lock);
495}