2 * Copyright (c) 2004, 2005, 2006 Voltaire, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/slab.h>
36 #include <linux/delay.h>
38 #include "iscsi_iser.h"
40 #define ISCSI_ISER_MAX_CONN 8
41 #define ISER_MAX_RX_CQ_LEN (ISER_QP_MAX_RECV_DTOS * ISCSI_ISER_MAX_CONN)
42 #define ISER_MAX_TX_CQ_LEN (ISER_QP_MAX_REQ_DTOS * ISCSI_ISER_MAX_CONN)
44 static void iser_cq_tasklet_fn(unsigned long data);
45 static void iser_cq_callback(struct ib_cq *cq, void *cq_context);
47 static void iser_cq_event_callback(struct ib_event *cause, void *context)
49 iser_err("got cq event %d \n", cause->event);
52 static void iser_qp_event_callback(struct ib_event *cause, void *context)
54 iser_err("got qp event %d\n",cause->event);
57 static void iser_event_handler(struct ib_event_handler *handler,
58 struct ib_event *event)
60 iser_err("async event %d on device %s port %d\n", event->event,
61 event->device->name, event->element.port_num);
65 * iser_create_device_ib_res - creates Protection Domain (PD), Completion
66 * Queue (CQ), DMA Memory Region (DMA MR) with the device associated with
69 * returns 0 on success, -1 on failure
71 static int iser_create_device_ib_res(struct iser_device *device)
73 device->pd = ib_alloc_pd(device->ib_device);
74 if (IS_ERR(device->pd))
77 device->rx_cq = ib_create_cq(device->ib_device,
79 iser_cq_event_callback,
81 ISER_MAX_RX_CQ_LEN, 0);
82 if (IS_ERR(device->rx_cq))
85 device->tx_cq = ib_create_cq(device->ib_device,
86 NULL, iser_cq_event_callback,
88 ISER_MAX_TX_CQ_LEN, 0);
90 if (IS_ERR(device->tx_cq))
93 if (ib_req_notify_cq(device->rx_cq, IB_CQ_NEXT_COMP))
96 tasklet_init(&device->cq_tasklet,
98 (unsigned long)device);
100 device->mr = ib_get_dma_mr(device->pd, IB_ACCESS_LOCAL_WRITE |
101 IB_ACCESS_REMOTE_WRITE |
102 IB_ACCESS_REMOTE_READ);
103 if (IS_ERR(device->mr))
106 INIT_IB_EVENT_HANDLER(&device->event_handler, device->ib_device,
108 if (ib_register_event_handler(&device->event_handler))
114 ib_dereg_mr(device->mr);
116 tasklet_kill(&device->cq_tasklet);
118 ib_destroy_cq(device->tx_cq);
120 ib_destroy_cq(device->rx_cq);
122 ib_dealloc_pd(device->pd);
124 iser_err("failed to allocate an IB resource\n");
129 * iser_free_device_ib_res - destroy/dealloc/dereg the DMA MR,
130 * CQ and PD created with the device associated with the adapator.
132 static void iser_free_device_ib_res(struct iser_device *device)
134 BUG_ON(device->mr == NULL);
136 tasklet_kill(&device->cq_tasklet);
137 (void)ib_unregister_event_handler(&device->event_handler);
138 (void)ib_dereg_mr(device->mr);
139 (void)ib_destroy_cq(device->tx_cq);
140 (void)ib_destroy_cq(device->rx_cq);
141 (void)ib_dealloc_pd(device->pd);
144 device->tx_cq = NULL;
145 device->rx_cq = NULL;
150 * iser_create_ib_conn_res - Creates FMR pool and Queue-Pair (QP)
152 * returns 0 on success, -1 on failure
154 static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
156 struct iser_device *device;
157 struct ib_qp_init_attr init_attr;
159 struct ib_fmr_pool_param params;
161 BUG_ON(ib_conn->device == NULL);
163 device = ib_conn->device;
165 ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL);
166 if (!ib_conn->login_buf) {
171 ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device,
172 (void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE,
175 ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
176 (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
178 if (!ib_conn->page_vec) {
182 ib_conn->page_vec->pages = (u64 *) (ib_conn->page_vec + 1);
184 params.page_shift = SHIFT_4K;
185 /* when the first/last SG element are not start/end *
186 * page aligned, the map whould be of N+1 pages */
187 params.max_pages_per_fmr = ISCSI_ISER_SG_TABLESIZE + 1;
188 /* make the pool size twice the max number of SCSI commands *
189 * the ML is expected to queue, watermark for unmap at 50% */
190 params.pool_size = ISCSI_DEF_XMIT_CMDS_MAX * 2;
191 params.dirty_watermark = ISCSI_DEF_XMIT_CMDS_MAX;
193 params.flush_function = NULL;
194 params.access = (IB_ACCESS_LOCAL_WRITE |
195 IB_ACCESS_REMOTE_WRITE |
196 IB_ACCESS_REMOTE_READ);
198 ib_conn->fmr_pool = ib_create_fmr_pool(device->pd, ¶ms);
199 if (IS_ERR(ib_conn->fmr_pool)) {
200 ret = PTR_ERR(ib_conn->fmr_pool);
204 memset(&init_attr, 0, sizeof init_attr);
206 init_attr.event_handler = iser_qp_event_callback;
207 init_attr.qp_context = (void *)ib_conn;
208 init_attr.send_cq = device->tx_cq;
209 init_attr.recv_cq = device->rx_cq;
210 init_attr.cap.max_send_wr = ISER_QP_MAX_REQ_DTOS;
211 init_attr.cap.max_recv_wr = ISER_QP_MAX_RECV_DTOS;
212 init_attr.cap.max_send_sge = 2;
213 init_attr.cap.max_recv_sge = 1;
214 init_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
215 init_attr.qp_type = IB_QPT_RC;
217 ret = rdma_create_qp(ib_conn->cma_id, device->pd, &init_attr);
221 ib_conn->qp = ib_conn->cma_id->qp;
222 iser_err("setting conn %p cma_id %p: fmr_pool %p qp %p\n",
223 ib_conn, ib_conn->cma_id,
224 ib_conn->fmr_pool, ib_conn->cma_id->qp);
228 (void)ib_destroy_fmr_pool(ib_conn->fmr_pool);
230 kfree(ib_conn->page_vec);
231 kfree(ib_conn->login_buf);
233 iser_err("unable to alloc mem or create resource, err %d\n", ret);
238 * releases the FMR pool, QP and CMA ID objects, returns 0 on success,
241 static int iser_free_ib_conn_res(struct iser_conn *ib_conn)
243 BUG_ON(ib_conn == NULL);
245 iser_err("freeing conn %p cma_id %p fmr pool %p qp %p\n",
246 ib_conn, ib_conn->cma_id,
247 ib_conn->fmr_pool, ib_conn->qp);
249 /* qp is created only once both addr & route are resolved */
250 if (ib_conn->fmr_pool != NULL)
251 ib_destroy_fmr_pool(ib_conn->fmr_pool);
253 if (ib_conn->qp != NULL)
254 rdma_destroy_qp(ib_conn->cma_id);
256 if (ib_conn->cma_id != NULL)
257 rdma_destroy_id(ib_conn->cma_id);
259 ib_conn->fmr_pool = NULL;
261 ib_conn->cma_id = NULL;
262 kfree(ib_conn->page_vec);
268 * based on the resolved device node GUID see if there already allocated
269 * device for this device. If there's no such, create one.
272 struct iser_device *iser_device_find_by_ib_device(struct rdma_cm_id *cma_id)
274 struct iser_device *device;
276 mutex_lock(&ig.device_list_mutex);
278 list_for_each_entry(device, &ig.device_list, ig_list)
279 /* find if there's a match using the node GUID */
280 if (device->ib_device->node_guid == cma_id->device->node_guid)
283 device = kzalloc(sizeof *device, GFP_KERNEL);
287 /* assign this device to the device */
288 device->ib_device = cma_id->device;
289 /* init the device and link it into ig device list */
290 if (iser_create_device_ib_res(device)) {
295 list_add(&device->ig_list, &ig.device_list);
300 mutex_unlock(&ig.device_list_mutex);
304 /* if there's no demand for this device, release it */
305 static void iser_device_try_release(struct iser_device *device)
307 mutex_lock(&ig.device_list_mutex);
309 iser_err("device %p refcount %d\n",device,device->refcount);
310 if (!device->refcount) {
311 iser_free_device_ib_res(device);
312 list_del(&device->ig_list);
315 mutex_unlock(&ig.device_list_mutex);
318 static int iser_conn_state_comp_exch(struct iser_conn *ib_conn,
319 enum iser_ib_conn_state comp,
320 enum iser_ib_conn_state exch)
324 spin_lock_bh(&ib_conn->lock);
325 if ((ret = (ib_conn->state == comp)))
326 ib_conn->state = exch;
327 spin_unlock_bh(&ib_conn->lock);
332 * Frees all conn objects and deallocs conn descriptor
334 static void iser_conn_release(struct iser_conn *ib_conn)
336 struct iser_device *device = ib_conn->device;
338 BUG_ON(ib_conn->state != ISER_CONN_DOWN);
340 mutex_lock(&ig.connlist_mutex);
341 list_del(&ib_conn->conn_list);
342 mutex_unlock(&ig.connlist_mutex);
343 iser_free_rx_descriptors(ib_conn);
344 iser_free_ib_conn_res(ib_conn);
345 ib_conn->device = NULL;
346 /* on EVENT_ADDR_ERROR there's no device yet for this conn */
348 iser_device_try_release(device);
349 if (ib_conn->iser_conn)
350 ib_conn->iser_conn->ib_conn = NULL;
351 iscsi_destroy_endpoint(ib_conn->ep);
354 void iser_conn_get(struct iser_conn *ib_conn)
356 atomic_inc(&ib_conn->refcount);
359 void iser_conn_put(struct iser_conn *ib_conn)
361 if (atomic_dec_and_test(&ib_conn->refcount))
362 iser_conn_release(ib_conn);
366 * triggers start of the disconnect procedures and wait for them to be done
368 void iser_conn_terminate(struct iser_conn *ib_conn)
372 /* change the ib conn state only if the conn is UP, however always call
373 * rdma_disconnect since this is the only way to cause the CMA to change
374 * the QP state to ERROR
377 iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP, ISER_CONN_TERMINATING);
378 err = rdma_disconnect(ib_conn->cma_id);
380 iser_err("Failed to disconnect, conn: 0x%p err %d\n",
383 wait_event_interruptible(ib_conn->wait,
384 ib_conn->state == ISER_CONN_DOWN);
386 iser_conn_put(ib_conn);
389 static void iser_connect_error(struct rdma_cm_id *cma_id)
391 struct iser_conn *ib_conn;
392 ib_conn = (struct iser_conn *)cma_id->context;
394 ib_conn->state = ISER_CONN_DOWN;
395 wake_up_interruptible(&ib_conn->wait);
398 static void iser_addr_handler(struct rdma_cm_id *cma_id)
400 struct iser_device *device;
401 struct iser_conn *ib_conn;
404 device = iser_device_find_by_ib_device(cma_id);
406 iser_err("device lookup/creation failed\n");
407 iser_connect_error(cma_id);
411 ib_conn = (struct iser_conn *)cma_id->context;
412 ib_conn->device = device;
414 ret = rdma_resolve_route(cma_id, 1000);
416 iser_err("resolve route failed: %d\n", ret);
417 iser_connect_error(cma_id);
421 static void iser_route_handler(struct rdma_cm_id *cma_id)
423 struct rdma_conn_param conn_param;
426 ret = iser_create_ib_conn_res((struct iser_conn *)cma_id->context);
430 memset(&conn_param, 0, sizeof conn_param);
431 conn_param.responder_resources = 4;
432 conn_param.initiator_depth = 1;
433 conn_param.retry_count = 7;
434 conn_param.rnr_retry_count = 6;
436 ret = rdma_connect(cma_id, &conn_param);
438 iser_err("failure connecting: %d\n", ret);
444 iser_connect_error(cma_id);
447 static void iser_connected_handler(struct rdma_cm_id *cma_id)
449 struct iser_conn *ib_conn;
451 ib_conn = (struct iser_conn *)cma_id->context;
452 ib_conn->state = ISER_CONN_UP;
453 wake_up_interruptible(&ib_conn->wait);
456 static void iser_disconnected_handler(struct rdma_cm_id *cma_id)
458 struct iser_conn *ib_conn;
460 ib_conn = (struct iser_conn *)cma_id->context;
461 ib_conn->disc_evt_flag = 1;
463 /* getting here when the state is UP means that the conn is being *
464 * terminated asynchronously from the iSCSI layer's perspective. */
465 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
466 ISER_CONN_TERMINATING))
467 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
468 ISCSI_ERR_CONN_FAILED);
470 /* Complete the termination process if no posts are pending */
471 if (ib_conn->post_recv_buf_count == 0 &&
472 (atomic_read(&ib_conn->post_send_buf_count) == 0)) {
473 ib_conn->state = ISER_CONN_DOWN;
474 wake_up_interruptible(&ib_conn->wait);
478 static int iser_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
482 iser_err("event %d conn %p id %p\n",event->event,cma_id->context,cma_id);
484 switch (event->event) {
485 case RDMA_CM_EVENT_ADDR_RESOLVED:
486 iser_addr_handler(cma_id);
488 case RDMA_CM_EVENT_ROUTE_RESOLVED:
489 iser_route_handler(cma_id);
491 case RDMA_CM_EVENT_ESTABLISHED:
492 iser_connected_handler(cma_id);
494 case RDMA_CM_EVENT_ADDR_ERROR:
495 case RDMA_CM_EVENT_ROUTE_ERROR:
496 case RDMA_CM_EVENT_CONNECT_ERROR:
497 case RDMA_CM_EVENT_UNREACHABLE:
498 case RDMA_CM_EVENT_REJECTED:
499 iser_err("event: %d, error: %d\n", event->event, event->status);
500 iser_connect_error(cma_id);
502 case RDMA_CM_EVENT_DISCONNECTED:
503 case RDMA_CM_EVENT_DEVICE_REMOVAL:
504 case RDMA_CM_EVENT_ADDR_CHANGE:
505 iser_disconnected_handler(cma_id);
508 iser_err("Unexpected RDMA CM event (%d)\n", event->event);
514 void iser_conn_init(struct iser_conn *ib_conn)
516 ib_conn->state = ISER_CONN_INIT;
517 init_waitqueue_head(&ib_conn->wait);
518 ib_conn->post_recv_buf_count = 0;
519 atomic_set(&ib_conn->post_send_buf_count, 0);
520 atomic_set(&ib_conn->refcount, 1);
521 INIT_LIST_HEAD(&ib_conn->conn_list);
522 spin_lock_init(&ib_conn->lock);
526 * starts the process of connecting to the target
527 * sleeps until the connection is established or rejected
529 int iser_connect(struct iser_conn *ib_conn,
530 struct sockaddr_in *src_addr,
531 struct sockaddr_in *dst_addr,
534 struct sockaddr *src, *dst;
537 sprintf(ib_conn->name, "%pI4:%d",
538 &dst_addr->sin_addr.s_addr, dst_addr->sin_port);
540 /* the device is known only --after-- address resolution */
541 ib_conn->device = NULL;
543 iser_err("connecting to: %pI4, port 0x%x\n",
544 &dst_addr->sin_addr, dst_addr->sin_port);
546 ib_conn->state = ISER_CONN_PENDING;
548 ib_conn->cma_id = rdma_create_id(iser_cma_handler,
551 if (IS_ERR(ib_conn->cma_id)) {
552 err = PTR_ERR(ib_conn->cma_id);
553 iser_err("rdma_create_id failed: %d\n", err);
557 src = (struct sockaddr *)src_addr;
558 dst = (struct sockaddr *)dst_addr;
559 err = rdma_resolve_addr(ib_conn->cma_id, src, dst, 1000);
561 iser_err("rdma_resolve_addr failed: %d\n", err);
566 wait_event_interruptible(ib_conn->wait,
567 (ib_conn->state != ISER_CONN_PENDING));
569 if (ib_conn->state != ISER_CONN_UP) {
571 goto connect_failure;
575 mutex_lock(&ig.connlist_mutex);
576 list_add(&ib_conn->conn_list, &ig.connlist);
577 mutex_unlock(&ig.connlist_mutex);
581 ib_conn->cma_id = NULL;
583 ib_conn->state = ISER_CONN_DOWN;
585 iser_conn_release(ib_conn);
590 * iser_reg_page_vec - Register physical memory
592 * returns: 0 on success, errno code on failure
594 int iser_reg_page_vec(struct iser_conn *ib_conn,
595 struct iser_page_vec *page_vec,
596 struct iser_mem_reg *mem_reg)
598 struct ib_pool_fmr *mem;
603 page_list = page_vec->pages;
604 io_addr = page_list[0];
606 mem = ib_fmr_pool_map_phys(ib_conn->fmr_pool,
612 status = (int)PTR_ERR(mem);
613 iser_err("ib_fmr_pool_map_phys failed: %d\n", status);
617 mem_reg->lkey = mem->fmr->lkey;
618 mem_reg->rkey = mem->fmr->rkey;
619 mem_reg->len = page_vec->length * SIZE_4K;
620 mem_reg->va = io_addr;
622 mem_reg->mem_h = (void *)mem;
624 mem_reg->va += page_vec->offset;
625 mem_reg->len = page_vec->data_size;
627 iser_dbg("PHYSICAL Mem.register, [PHYS p_array: 0x%p, sz: %d, "
628 "entry[0]: (0x%08lx,%ld)] -> "
629 "[lkey: 0x%08X mem_h: 0x%p va: 0x%08lX sz: %ld]\n",
630 page_vec, page_vec->length,
631 (unsigned long)page_vec->pages[0],
632 (unsigned long)page_vec->data_size,
633 (unsigned int)mem_reg->lkey, mem_reg->mem_h,
634 (unsigned long)mem_reg->va, (unsigned long)mem_reg->len);
639 * Unregister (previosuly registered) memory.
641 void iser_unreg_mem(struct iser_mem_reg *reg)
645 iser_dbg("PHYSICAL Mem.Unregister mem_h %p\n",reg->mem_h);
647 ret = ib_fmr_pool_unmap((struct ib_pool_fmr *)reg->mem_h);
649 iser_err("ib_fmr_pool_unmap failed %d\n", ret);
654 int iser_post_recvl(struct iser_conn *ib_conn)
656 struct ib_recv_wr rx_wr, *rx_wr_failed;
660 sge.addr = ib_conn->login_dma;
661 sge.length = ISER_RX_LOGIN_SIZE;
662 sge.lkey = ib_conn->device->mr->lkey;
664 rx_wr.wr_id = (unsigned long)ib_conn->login_buf;
665 rx_wr.sg_list = &sge;
669 ib_conn->post_recv_buf_count++;
670 ib_ret = ib_post_recv(ib_conn->qp, &rx_wr, &rx_wr_failed);
672 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
673 ib_conn->post_recv_buf_count--;
678 int iser_post_recvm(struct iser_conn *ib_conn, int count)
680 struct ib_recv_wr *rx_wr, *rx_wr_failed;
682 unsigned int my_rx_head = ib_conn->rx_desc_head;
683 struct iser_rx_desc *rx_desc;
685 for (rx_wr = ib_conn->rx_wr, i = 0; i < count; i++, rx_wr++) {
686 rx_desc = &ib_conn->rx_descs[my_rx_head];
687 rx_wr->wr_id = (unsigned long)rx_desc;
688 rx_wr->sg_list = &rx_desc->rx_sg;
690 rx_wr->next = rx_wr + 1;
691 my_rx_head = (my_rx_head + 1) & (ISER_QP_MAX_RECV_DTOS - 1);
695 rx_wr->next = NULL; /* mark end of work requests list */
697 ib_conn->post_recv_buf_count += count;
698 ib_ret = ib_post_recv(ib_conn->qp, ib_conn->rx_wr, &rx_wr_failed);
700 iser_err("ib_post_recv failed ret=%d\n", ib_ret);
701 ib_conn->post_recv_buf_count -= count;
703 ib_conn->rx_desc_head = my_rx_head;
709 * iser_start_send - Initiate a Send DTO operation
711 * returns 0 on success, -1 on failure
713 int iser_post_send(struct iser_conn *ib_conn, struct iser_tx_desc *tx_desc)
716 struct ib_send_wr send_wr, *send_wr_failed;
718 ib_dma_sync_single_for_device(ib_conn->device->ib_device,
719 tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
722 send_wr.wr_id = (unsigned long)tx_desc;
723 send_wr.sg_list = tx_desc->tx_sg;
724 send_wr.num_sge = tx_desc->num_sge;
725 send_wr.opcode = IB_WR_SEND;
726 send_wr.send_flags = IB_SEND_SIGNALED;
728 atomic_inc(&ib_conn->post_send_buf_count);
730 ib_ret = ib_post_send(ib_conn->qp, &send_wr, &send_wr_failed);
732 iser_err("ib_post_send failed, ret:%d\n", ib_ret);
733 atomic_dec(&ib_conn->post_send_buf_count);
738 static void iser_handle_comp_error(struct iser_tx_desc *desc,
739 struct iser_conn *ib_conn)
741 if (desc && desc->type == ISCSI_TX_DATAOUT)
742 kmem_cache_free(ig.desc_cache, desc);
744 if (ib_conn->post_recv_buf_count == 0 &&
745 atomic_read(&ib_conn->post_send_buf_count) == 0) {
746 /* getting here when the state is UP means that the conn is *
747 * being terminated asynchronously from the iSCSI layer's *
749 if (iser_conn_state_comp_exch(ib_conn, ISER_CONN_UP,
750 ISER_CONN_TERMINATING))
751 iscsi_conn_failure(ib_conn->iser_conn->iscsi_conn,
752 ISCSI_ERR_CONN_FAILED);
754 /* complete the termination process if disconnect event was delivered *
755 * note there are no more non completed posts to the QP */
756 if (ib_conn->disc_evt_flag) {
757 ib_conn->state = ISER_CONN_DOWN;
758 wake_up_interruptible(&ib_conn->wait);
763 static int iser_drain_tx_cq(struct iser_device *device)
765 struct ib_cq *cq = device->tx_cq;
767 struct iser_tx_desc *tx_desc;
768 struct iser_conn *ib_conn;
769 int completed_tx = 0;
771 while (ib_poll_cq(cq, 1, &wc) == 1) {
772 tx_desc = (struct iser_tx_desc *) (unsigned long) wc.wr_id;
773 ib_conn = wc.qp->qp_context;
774 if (wc.status == IB_WC_SUCCESS) {
775 if (wc.opcode == IB_WC_SEND)
776 iser_snd_completion(tx_desc, ib_conn);
778 iser_err("expected opcode %d got %d\n",
779 IB_WC_SEND, wc.opcode);
781 iser_err("tx id %llx status %d vend_err %x\n",
782 wc.wr_id, wc.status, wc.vendor_err);
783 atomic_dec(&ib_conn->post_send_buf_count);
784 iser_handle_comp_error(tx_desc, ib_conn);
792 static void iser_cq_tasklet_fn(unsigned long data)
794 struct iser_device *device = (struct iser_device *)data;
795 struct ib_cq *cq = device->rx_cq;
797 struct iser_rx_desc *desc;
798 unsigned long xfer_len;
799 struct iser_conn *ib_conn;
800 int completed_tx, completed_rx;
801 completed_tx = completed_rx = 0;
803 while (ib_poll_cq(cq, 1, &wc) == 1) {
804 desc = (struct iser_rx_desc *) (unsigned long) wc.wr_id;
805 BUG_ON(desc == NULL);
806 ib_conn = wc.qp->qp_context;
807 if (wc.status == IB_WC_SUCCESS) {
808 if (wc.opcode == IB_WC_RECV) {
809 xfer_len = (unsigned long)wc.byte_len;
810 iser_rcv_completion(desc, xfer_len, ib_conn);
812 iser_err("expected opcode %d got %d\n",
813 IB_WC_RECV, wc.opcode);
815 if (wc.status != IB_WC_WR_FLUSH_ERR)
816 iser_err("rx id %llx status %d vend_err %x\n",
817 wc.wr_id, wc.status, wc.vendor_err);
818 ib_conn->post_recv_buf_count--;
819 iser_handle_comp_error(NULL, ib_conn);
822 if (!(completed_rx & 63))
823 completed_tx += iser_drain_tx_cq(device);
825 /* #warning "it is assumed here that arming CQ only once its empty" *
826 * " would not cause interrupts to be missed" */
827 ib_req_notify_cq(cq, IB_CQ_NEXT_COMP);
829 completed_tx += iser_drain_tx_cq(device);
830 iser_dbg("got %d rx %d tx completions\n", completed_rx, completed_tx);
833 static void iser_cq_callback(struct ib_cq *cq, void *cq_context)
835 struct iser_device *device = (struct iser_device *)cq_context;
837 tasklet_schedule(&device->cq_tasklet);