2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 PathScale, Inc. All rights reserved.
5 * Copyright (c) 2006 Mellanox Technologies. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: uverbs_cmd.c 2708 2005-06-24 17:27:21Z roland $
38 #include <linux/file.h>
41 #include <asm/uaccess.h>
45 #define INIT_UDATA(udata, ibuf, obuf, ilen, olen) \
47 (udata)->inbuf = (void __user *) (ibuf); \
48 (udata)->outbuf = (void __user *) (obuf); \
49 (udata)->inlen = (ilen); \
50 (udata)->outlen = (olen); \
53 static int idr_add_uobj(struct idr *idr, void *obj, struct ib_uobject *uobj)
58 if (!idr_pre_get(idr, GFP_KERNEL))
61 ret = idr_get_new(idr, uobj, &uobj->id);
69 ssize_t ib_uverbs_get_context(struct ib_uverbs_file *file,
70 const char __user *buf,
71 int in_len, int out_len)
73 struct ib_uverbs_get_context cmd;
74 struct ib_uverbs_get_context_resp resp;
75 struct ib_udata udata;
76 struct ib_device *ibdev = file->device->ib_dev;
77 struct ib_ucontext *ucontext;
81 if (out_len < sizeof resp)
84 if (copy_from_user(&cmd, buf, sizeof cmd))
87 mutex_lock(&file->mutex);
94 INIT_UDATA(&udata, buf + sizeof cmd,
95 (unsigned long) cmd.response + sizeof resp,
96 in_len - sizeof cmd, out_len - sizeof resp);
98 ucontext = ibdev->alloc_ucontext(ibdev, &udata);
99 if (IS_ERR(ucontext)) {
100 ret = PTR_ERR(file->ucontext);
104 ucontext->device = ibdev;
105 INIT_LIST_HEAD(&ucontext->pd_list);
106 INIT_LIST_HEAD(&ucontext->mr_list);
107 INIT_LIST_HEAD(&ucontext->mw_list);
108 INIT_LIST_HEAD(&ucontext->cq_list);
109 INIT_LIST_HEAD(&ucontext->qp_list);
110 INIT_LIST_HEAD(&ucontext->srq_list);
111 INIT_LIST_HEAD(&ucontext->ah_list);
113 resp.num_comp_vectors = file->device->num_comp_vectors;
115 filp = ib_uverbs_alloc_event_file(file, 1, &resp.async_fd);
121 if (copy_to_user((void __user *) (unsigned long) cmd.response,
122 &resp, sizeof resp)) {
127 file->async_file = filp->private_data;
129 INIT_IB_EVENT_HANDLER(&file->event_handler, file->device->ib_dev,
130 ib_uverbs_event_handler);
131 ret = ib_register_event_handler(&file->event_handler);
135 kref_get(&file->async_file->ref);
136 kref_get(&file->ref);
137 file->ucontext = ucontext;
139 fd_install(resp.async_fd, filp);
141 mutex_unlock(&file->mutex);
146 put_unused_fd(resp.async_fd);
150 ibdev->dealloc_ucontext(ucontext);
153 mutex_unlock(&file->mutex);
157 ssize_t ib_uverbs_query_device(struct ib_uverbs_file *file,
158 const char __user *buf,
159 int in_len, int out_len)
161 struct ib_uverbs_query_device cmd;
162 struct ib_uverbs_query_device_resp resp;
163 struct ib_device_attr attr;
166 if (out_len < sizeof resp)
169 if (copy_from_user(&cmd, buf, sizeof cmd))
172 ret = ib_query_device(file->device->ib_dev, &attr);
176 memset(&resp, 0, sizeof resp);
178 resp.fw_ver = attr.fw_ver;
179 resp.node_guid = file->device->ib_dev->node_guid;
180 resp.sys_image_guid = attr.sys_image_guid;
181 resp.max_mr_size = attr.max_mr_size;
182 resp.page_size_cap = attr.page_size_cap;
183 resp.vendor_id = attr.vendor_id;
184 resp.vendor_part_id = attr.vendor_part_id;
185 resp.hw_ver = attr.hw_ver;
186 resp.max_qp = attr.max_qp;
187 resp.max_qp_wr = attr.max_qp_wr;
188 resp.device_cap_flags = attr.device_cap_flags;
189 resp.max_sge = attr.max_sge;
190 resp.max_sge_rd = attr.max_sge_rd;
191 resp.max_cq = attr.max_cq;
192 resp.max_cqe = attr.max_cqe;
193 resp.max_mr = attr.max_mr;
194 resp.max_pd = attr.max_pd;
195 resp.max_qp_rd_atom = attr.max_qp_rd_atom;
196 resp.max_ee_rd_atom = attr.max_ee_rd_atom;
197 resp.max_res_rd_atom = attr.max_res_rd_atom;
198 resp.max_qp_init_rd_atom = attr.max_qp_init_rd_atom;
199 resp.max_ee_init_rd_atom = attr.max_ee_init_rd_atom;
200 resp.atomic_cap = attr.atomic_cap;
201 resp.max_ee = attr.max_ee;
202 resp.max_rdd = attr.max_rdd;
203 resp.max_mw = attr.max_mw;
204 resp.max_raw_ipv6_qp = attr.max_raw_ipv6_qp;
205 resp.max_raw_ethy_qp = attr.max_raw_ethy_qp;
206 resp.max_mcast_grp = attr.max_mcast_grp;
207 resp.max_mcast_qp_attach = attr.max_mcast_qp_attach;
208 resp.max_total_mcast_qp_attach = attr.max_total_mcast_qp_attach;
209 resp.max_ah = attr.max_ah;
210 resp.max_fmr = attr.max_fmr;
211 resp.max_map_per_fmr = attr.max_map_per_fmr;
212 resp.max_srq = attr.max_srq;
213 resp.max_srq_wr = attr.max_srq_wr;
214 resp.max_srq_sge = attr.max_srq_sge;
215 resp.max_pkeys = attr.max_pkeys;
216 resp.local_ca_ack_delay = attr.local_ca_ack_delay;
217 resp.phys_port_cnt = file->device->ib_dev->phys_port_cnt;
219 if (copy_to_user((void __user *) (unsigned long) cmd.response,
226 ssize_t ib_uverbs_query_port(struct ib_uverbs_file *file,
227 const char __user *buf,
228 int in_len, int out_len)
230 struct ib_uverbs_query_port cmd;
231 struct ib_uverbs_query_port_resp resp;
232 struct ib_port_attr attr;
235 if (out_len < sizeof resp)
238 if (copy_from_user(&cmd, buf, sizeof cmd))
241 ret = ib_query_port(file->device->ib_dev, cmd.port_num, &attr);
245 memset(&resp, 0, sizeof resp);
247 resp.state = attr.state;
248 resp.max_mtu = attr.max_mtu;
249 resp.active_mtu = attr.active_mtu;
250 resp.gid_tbl_len = attr.gid_tbl_len;
251 resp.port_cap_flags = attr.port_cap_flags;
252 resp.max_msg_sz = attr.max_msg_sz;
253 resp.bad_pkey_cntr = attr.bad_pkey_cntr;
254 resp.qkey_viol_cntr = attr.qkey_viol_cntr;
255 resp.pkey_tbl_len = attr.pkey_tbl_len;
257 resp.sm_lid = attr.sm_lid;
259 resp.max_vl_num = attr.max_vl_num;
260 resp.sm_sl = attr.sm_sl;
261 resp.subnet_timeout = attr.subnet_timeout;
262 resp.init_type_reply = attr.init_type_reply;
263 resp.active_width = attr.active_width;
264 resp.active_speed = attr.active_speed;
265 resp.phys_state = attr.phys_state;
267 if (copy_to_user((void __user *) (unsigned long) cmd.response,
274 ssize_t ib_uverbs_alloc_pd(struct ib_uverbs_file *file,
275 const char __user *buf,
276 int in_len, int out_len)
278 struct ib_uverbs_alloc_pd cmd;
279 struct ib_uverbs_alloc_pd_resp resp;
280 struct ib_udata udata;
281 struct ib_uobject *uobj;
285 if (out_len < sizeof resp)
288 if (copy_from_user(&cmd, buf, sizeof cmd))
291 INIT_UDATA(&udata, buf + sizeof cmd,
292 (unsigned long) cmd.response + sizeof resp,
293 in_len - sizeof cmd, out_len - sizeof resp);
295 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
299 uobj->context = file->ucontext;
301 pd = file->device->ib_dev->alloc_pd(file->device->ib_dev,
302 file->ucontext, &udata);
308 pd->device = file->device->ib_dev;
310 atomic_set(&pd->usecnt, 0);
312 mutex_lock(&ib_uverbs_idr_mutex);
314 ret = idr_add_uobj(&ib_uverbs_pd_idr, pd, uobj);
318 memset(&resp, 0, sizeof resp);
319 resp.pd_handle = uobj->id;
321 if (copy_to_user((void __user *) (unsigned long) cmd.response,
322 &resp, sizeof resp)) {
327 mutex_lock(&file->mutex);
328 list_add_tail(&uobj->list, &file->ucontext->pd_list);
329 mutex_unlock(&file->mutex);
331 mutex_unlock(&ib_uverbs_idr_mutex);
336 idr_remove(&ib_uverbs_pd_idr, uobj->id);
339 mutex_unlock(&ib_uverbs_idr_mutex);
347 ssize_t ib_uverbs_dealloc_pd(struct ib_uverbs_file *file,
348 const char __user *buf,
349 int in_len, int out_len)
351 struct ib_uverbs_dealloc_pd cmd;
353 struct ib_uobject *uobj;
356 if (copy_from_user(&cmd, buf, sizeof cmd))
359 mutex_lock(&ib_uverbs_idr_mutex);
361 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
362 if (!pd || pd->uobject->context != file->ucontext)
367 ret = ib_dealloc_pd(pd);
371 idr_remove(&ib_uverbs_pd_idr, cmd.pd_handle);
373 mutex_lock(&file->mutex);
374 list_del(&uobj->list);
375 mutex_unlock(&file->mutex);
380 mutex_unlock(&ib_uverbs_idr_mutex);
382 return ret ? ret : in_len;
385 ssize_t ib_uverbs_reg_mr(struct ib_uverbs_file *file,
386 const char __user *buf, int in_len,
389 struct ib_uverbs_reg_mr cmd;
390 struct ib_uverbs_reg_mr_resp resp;
391 struct ib_udata udata;
392 struct ib_umem_object *obj;
397 if (out_len < sizeof resp)
400 if (copy_from_user(&cmd, buf, sizeof cmd))
403 INIT_UDATA(&udata, buf + sizeof cmd,
404 (unsigned long) cmd.response + sizeof resp,
405 in_len - sizeof cmd, out_len - sizeof resp);
407 if ((cmd.start & ~PAGE_MASK) != (cmd.hca_va & ~PAGE_MASK))
411 * Local write permission is required if remote write or
412 * remote atomic permission is also requested.
414 if (cmd.access_flags & (IB_ACCESS_REMOTE_ATOMIC | IB_ACCESS_REMOTE_WRITE) &&
415 !(cmd.access_flags & IB_ACCESS_LOCAL_WRITE))
418 obj = kmalloc(sizeof *obj, GFP_KERNEL);
422 obj->uobject.context = file->ucontext;
425 * We ask for writable memory if any access flags other than
426 * "remote read" are set. "Local write" and "remote write"
427 * obviously require write access. "Remote atomic" can do
428 * things like fetch and add, which will modify memory, and
429 * "MW bind" can change permissions by binding a window.
431 ret = ib_umem_get(file->device->ib_dev, &obj->umem,
432 (void *) (unsigned long) cmd.start, cmd.length,
433 !!(cmd.access_flags & ~IB_ACCESS_REMOTE_READ));
437 obj->umem.virt_base = cmd.hca_va;
439 mutex_lock(&ib_uverbs_idr_mutex);
441 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
442 if (!pd || pd->uobject->context != file->ucontext) {
447 if (!pd->device->reg_user_mr) {
452 mr = pd->device->reg_user_mr(pd, &obj->umem, cmd.access_flags, &udata);
458 mr->device = pd->device;
460 mr->uobject = &obj->uobject;
461 atomic_inc(&pd->usecnt);
462 atomic_set(&mr->usecnt, 0);
464 memset(&resp, 0, sizeof resp);
465 resp.lkey = mr->lkey;
466 resp.rkey = mr->rkey;
468 ret = idr_add_uobj(&ib_uverbs_mr_idr, mr, &obj->uobject);
472 resp.mr_handle = obj->uobject.id;
474 if (copy_to_user((void __user *) (unsigned long) cmd.response,
475 &resp, sizeof resp)) {
480 mutex_lock(&file->mutex);
481 list_add_tail(&obj->uobject.list, &file->ucontext->mr_list);
482 mutex_unlock(&file->mutex);
484 mutex_unlock(&ib_uverbs_idr_mutex);
489 idr_remove(&ib_uverbs_mr_idr, obj->uobject.id);
495 mutex_unlock(&ib_uverbs_idr_mutex);
497 ib_umem_release(file->device->ib_dev, &obj->umem);
504 ssize_t ib_uverbs_dereg_mr(struct ib_uverbs_file *file,
505 const char __user *buf, int in_len,
508 struct ib_uverbs_dereg_mr cmd;
510 struct ib_umem_object *memobj;
513 if (copy_from_user(&cmd, buf, sizeof cmd))
516 mutex_lock(&ib_uverbs_idr_mutex);
518 mr = idr_find(&ib_uverbs_mr_idr, cmd.mr_handle);
519 if (!mr || mr->uobject->context != file->ucontext)
522 memobj = container_of(mr->uobject, struct ib_umem_object, uobject);
524 ret = ib_dereg_mr(mr);
528 idr_remove(&ib_uverbs_mr_idr, cmd.mr_handle);
530 mutex_lock(&file->mutex);
531 list_del(&memobj->uobject.list);
532 mutex_unlock(&file->mutex);
534 ib_umem_release(file->device->ib_dev, &memobj->umem);
538 mutex_unlock(&ib_uverbs_idr_mutex);
540 return ret ? ret : in_len;
543 ssize_t ib_uverbs_create_comp_channel(struct ib_uverbs_file *file,
544 const char __user *buf, int in_len,
547 struct ib_uverbs_create_comp_channel cmd;
548 struct ib_uverbs_create_comp_channel_resp resp;
551 if (out_len < sizeof resp)
554 if (copy_from_user(&cmd, buf, sizeof cmd))
557 filp = ib_uverbs_alloc_event_file(file, 0, &resp.fd);
559 return PTR_ERR(filp);
561 if (copy_to_user((void __user *) (unsigned long) cmd.response,
562 &resp, sizeof resp)) {
563 put_unused_fd(resp.fd);
568 fd_install(resp.fd, filp);
572 ssize_t ib_uverbs_create_cq(struct ib_uverbs_file *file,
573 const char __user *buf, int in_len,
576 struct ib_uverbs_create_cq cmd;
577 struct ib_uverbs_create_cq_resp resp;
578 struct ib_udata udata;
579 struct ib_ucq_object *uobj;
580 struct ib_uverbs_event_file *ev_file = NULL;
584 if (out_len < sizeof resp)
587 if (copy_from_user(&cmd, buf, sizeof cmd))
590 INIT_UDATA(&udata, buf + sizeof cmd,
591 (unsigned long) cmd.response + sizeof resp,
592 in_len - sizeof cmd, out_len - sizeof resp);
594 if (cmd.comp_vector >= file->device->num_comp_vectors)
597 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
601 if (cmd.comp_channel >= 0) {
602 ev_file = ib_uverbs_lookup_comp_file(cmd.comp_channel);
609 uobj->uobject.user_handle = cmd.user_handle;
610 uobj->uobject.context = file->ucontext;
611 uobj->uverbs_file = file;
612 uobj->comp_events_reported = 0;
613 uobj->async_events_reported = 0;
614 INIT_LIST_HEAD(&uobj->comp_list);
615 INIT_LIST_HEAD(&uobj->async_list);
617 cq = file->device->ib_dev->create_cq(file->device->ib_dev, cmd.cqe,
618 file->ucontext, &udata);
624 cq->device = file->device->ib_dev;
625 cq->uobject = &uobj->uobject;
626 cq->comp_handler = ib_uverbs_comp_handler;
627 cq->event_handler = ib_uverbs_cq_event_handler;
628 cq->cq_context = ev_file;
629 atomic_set(&cq->usecnt, 0);
631 mutex_lock(&ib_uverbs_idr_mutex);
633 ret = idr_add_uobj(&ib_uverbs_cq_idr, cq, &uobj->uobject);
637 memset(&resp, 0, sizeof resp);
638 resp.cq_handle = uobj->uobject.id;
641 if (copy_to_user((void __user *) (unsigned long) cmd.response,
642 &resp, sizeof resp)) {
647 mutex_lock(&file->mutex);
648 list_add_tail(&uobj->uobject.list, &file->ucontext->cq_list);
649 mutex_unlock(&file->mutex);
651 mutex_unlock(&ib_uverbs_idr_mutex);
656 idr_remove(&ib_uverbs_cq_idr, uobj->uobject.id);
659 mutex_unlock(&ib_uverbs_idr_mutex);
664 ib_uverbs_release_ucq(file, ev_file, uobj);
669 ssize_t ib_uverbs_resize_cq(struct ib_uverbs_file *file,
670 const char __user *buf, int in_len,
673 struct ib_uverbs_resize_cq cmd;
674 struct ib_uverbs_resize_cq_resp resp;
675 struct ib_udata udata;
679 if (copy_from_user(&cmd, buf, sizeof cmd))
682 INIT_UDATA(&udata, buf + sizeof cmd,
683 (unsigned long) cmd.response + sizeof resp,
684 in_len - sizeof cmd, out_len - sizeof resp);
686 mutex_lock(&ib_uverbs_idr_mutex);
688 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
689 if (!cq || cq->uobject->context != file->ucontext || !cq->device->resize_cq)
692 ret = cq->device->resize_cq(cq, cmd.cqe, &udata);
696 memset(&resp, 0, sizeof resp);
699 if (copy_to_user((void __user *) (unsigned long) cmd.response,
704 mutex_unlock(&ib_uverbs_idr_mutex);
706 return ret ? ret : in_len;
709 ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
710 const char __user *buf, int in_len,
713 struct ib_uverbs_poll_cq cmd;
714 struct ib_uverbs_poll_cq_resp *resp;
721 if (copy_from_user(&cmd, buf, sizeof cmd))
724 wc = kmalloc(cmd.ne * sizeof *wc, GFP_KERNEL);
728 rsize = sizeof *resp + cmd.ne * sizeof(struct ib_uverbs_wc);
729 resp = kmalloc(rsize, GFP_KERNEL);
735 mutex_lock(&ib_uverbs_idr_mutex);
736 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
737 if (!cq || cq->uobject->context != file->ucontext) {
742 resp->count = ib_poll_cq(cq, cmd.ne, wc);
744 for (i = 0; i < resp->count; i++) {
745 resp->wc[i].wr_id = wc[i].wr_id;
746 resp->wc[i].status = wc[i].status;
747 resp->wc[i].opcode = wc[i].opcode;
748 resp->wc[i].vendor_err = wc[i].vendor_err;
749 resp->wc[i].byte_len = wc[i].byte_len;
750 resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data;
751 resp->wc[i].qp_num = wc[i].qp_num;
752 resp->wc[i].src_qp = wc[i].src_qp;
753 resp->wc[i].wc_flags = wc[i].wc_flags;
754 resp->wc[i].pkey_index = wc[i].pkey_index;
755 resp->wc[i].slid = wc[i].slid;
756 resp->wc[i].sl = wc[i].sl;
757 resp->wc[i].dlid_path_bits = wc[i].dlid_path_bits;
758 resp->wc[i].port_num = wc[i].port_num;
761 if (copy_to_user((void __user *) (unsigned long) cmd.response, resp, rsize))
765 mutex_unlock(&ib_uverbs_idr_mutex);
770 return ret ? ret : in_len;
773 ssize_t ib_uverbs_req_notify_cq(struct ib_uverbs_file *file,
774 const char __user *buf, int in_len,
777 struct ib_uverbs_req_notify_cq cmd;
781 if (copy_from_user(&cmd, buf, sizeof cmd))
784 mutex_lock(&ib_uverbs_idr_mutex);
785 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
786 if (cq && cq->uobject->context == file->ucontext) {
787 ib_req_notify_cq(cq, cmd.solicited_only ?
788 IB_CQ_SOLICITED : IB_CQ_NEXT_COMP);
791 mutex_unlock(&ib_uverbs_idr_mutex);
796 ssize_t ib_uverbs_destroy_cq(struct ib_uverbs_file *file,
797 const char __user *buf, int in_len,
800 struct ib_uverbs_destroy_cq cmd;
801 struct ib_uverbs_destroy_cq_resp resp;
803 struct ib_ucq_object *uobj;
804 struct ib_uverbs_event_file *ev_file;
808 if (copy_from_user(&cmd, buf, sizeof cmd))
811 memset(&resp, 0, sizeof resp);
813 mutex_lock(&ib_uverbs_idr_mutex);
815 cq = idr_find(&ib_uverbs_cq_idr, cmd.cq_handle);
816 if (!cq || cq->uobject->context != file->ucontext)
819 user_handle = cq->uobject->user_handle;
820 uobj = container_of(cq->uobject, struct ib_ucq_object, uobject);
821 ev_file = cq->cq_context;
823 ret = ib_destroy_cq(cq);
827 idr_remove(&ib_uverbs_cq_idr, cmd.cq_handle);
829 mutex_lock(&file->mutex);
830 list_del(&uobj->uobject.list);
831 mutex_unlock(&file->mutex);
833 ib_uverbs_release_ucq(file, ev_file, uobj);
835 resp.comp_events_reported = uobj->comp_events_reported;
836 resp.async_events_reported = uobj->async_events_reported;
840 if (copy_to_user((void __user *) (unsigned long) cmd.response,
845 mutex_unlock(&ib_uverbs_idr_mutex);
847 return ret ? ret : in_len;
850 ssize_t ib_uverbs_create_qp(struct ib_uverbs_file *file,
851 const char __user *buf, int in_len,
854 struct ib_uverbs_create_qp cmd;
855 struct ib_uverbs_create_qp_resp resp;
856 struct ib_udata udata;
857 struct ib_uqp_object *uobj;
859 struct ib_cq *scq, *rcq;
862 struct ib_qp_init_attr attr;
865 if (out_len < sizeof resp)
868 if (copy_from_user(&cmd, buf, sizeof cmd))
871 INIT_UDATA(&udata, buf + sizeof cmd,
872 (unsigned long) cmd.response + sizeof resp,
873 in_len - sizeof cmd, out_len - sizeof resp);
875 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
879 mutex_lock(&ib_uverbs_idr_mutex);
881 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
882 scq = idr_find(&ib_uverbs_cq_idr, cmd.send_cq_handle);
883 rcq = idr_find(&ib_uverbs_cq_idr, cmd.recv_cq_handle);
884 srq = cmd.is_srq ? idr_find(&ib_uverbs_srq_idr, cmd.srq_handle) : NULL;
886 if (!pd || pd->uobject->context != file->ucontext ||
887 !scq || scq->uobject->context != file->ucontext ||
888 !rcq || rcq->uobject->context != file->ucontext ||
889 (cmd.is_srq && (!srq || srq->uobject->context != file->ucontext))) {
894 attr.event_handler = ib_uverbs_qp_event_handler;
895 attr.qp_context = file;
899 attr.sq_sig_type = cmd.sq_sig_all ? IB_SIGNAL_ALL_WR : IB_SIGNAL_REQ_WR;
900 attr.qp_type = cmd.qp_type;
902 attr.cap.max_send_wr = cmd.max_send_wr;
903 attr.cap.max_recv_wr = cmd.max_recv_wr;
904 attr.cap.max_send_sge = cmd.max_send_sge;
905 attr.cap.max_recv_sge = cmd.max_recv_sge;
906 attr.cap.max_inline_data = cmd.max_inline_data;
908 uobj->uevent.uobject.user_handle = cmd.user_handle;
909 uobj->uevent.uobject.context = file->ucontext;
910 uobj->uevent.events_reported = 0;
911 INIT_LIST_HEAD(&uobj->uevent.event_list);
912 INIT_LIST_HEAD(&uobj->mcast_list);
914 qp = pd->device->create_qp(pd, &attr, &udata);
920 qp->device = pd->device;
922 qp->send_cq = attr.send_cq;
923 qp->recv_cq = attr.recv_cq;
925 qp->uobject = &uobj->uevent.uobject;
926 qp->event_handler = attr.event_handler;
927 qp->qp_context = attr.qp_context;
928 qp->qp_type = attr.qp_type;
929 atomic_inc(&pd->usecnt);
930 atomic_inc(&attr.send_cq->usecnt);
931 atomic_inc(&attr.recv_cq->usecnt);
933 atomic_inc(&attr.srq->usecnt);
935 memset(&resp, 0, sizeof resp);
936 resp.qpn = qp->qp_num;
938 ret = idr_add_uobj(&ib_uverbs_qp_idr, qp, &uobj->uevent.uobject);
942 resp.qp_handle = uobj->uevent.uobject.id;
943 resp.max_recv_sge = attr.cap.max_recv_sge;
944 resp.max_send_sge = attr.cap.max_send_sge;
945 resp.max_recv_wr = attr.cap.max_recv_wr;
946 resp.max_send_wr = attr.cap.max_send_wr;
947 resp.max_inline_data = attr.cap.max_inline_data;
949 if (copy_to_user((void __user *) (unsigned long) cmd.response,
950 &resp, sizeof resp)) {
955 mutex_lock(&file->mutex);
956 list_add_tail(&uobj->uevent.uobject.list, &file->ucontext->qp_list);
957 mutex_unlock(&file->mutex);
959 mutex_unlock(&ib_uverbs_idr_mutex);
964 idr_remove(&ib_uverbs_qp_idr, uobj->uevent.uobject.id);
970 mutex_unlock(&ib_uverbs_idr_mutex);
976 ssize_t ib_uverbs_query_qp(struct ib_uverbs_file *file,
977 const char __user *buf, int in_len,
980 struct ib_uverbs_query_qp cmd;
981 struct ib_uverbs_query_qp_resp resp;
983 struct ib_qp_attr *attr;
984 struct ib_qp_init_attr *init_attr;
987 if (copy_from_user(&cmd, buf, sizeof cmd))
990 attr = kmalloc(sizeof *attr, GFP_KERNEL);
991 init_attr = kmalloc(sizeof *init_attr, GFP_KERNEL);
992 if (!attr || !init_attr) {
997 mutex_lock(&ib_uverbs_idr_mutex);
999 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1000 if (qp && qp->uobject->context == file->ucontext)
1001 ret = ib_query_qp(qp, attr, cmd.attr_mask, init_attr);
1005 mutex_unlock(&ib_uverbs_idr_mutex);
1010 memset(&resp, 0, sizeof resp);
1012 resp.qp_state = attr->qp_state;
1013 resp.cur_qp_state = attr->cur_qp_state;
1014 resp.path_mtu = attr->path_mtu;
1015 resp.path_mig_state = attr->path_mig_state;
1016 resp.qkey = attr->qkey;
1017 resp.rq_psn = attr->rq_psn;
1018 resp.sq_psn = attr->sq_psn;
1019 resp.dest_qp_num = attr->dest_qp_num;
1020 resp.qp_access_flags = attr->qp_access_flags;
1021 resp.pkey_index = attr->pkey_index;
1022 resp.alt_pkey_index = attr->alt_pkey_index;
1023 resp.en_sqd_async_notify = attr->en_sqd_async_notify;
1024 resp.max_rd_atomic = attr->max_rd_atomic;
1025 resp.max_dest_rd_atomic = attr->max_dest_rd_atomic;
1026 resp.min_rnr_timer = attr->min_rnr_timer;
1027 resp.port_num = attr->port_num;
1028 resp.timeout = attr->timeout;
1029 resp.retry_cnt = attr->retry_cnt;
1030 resp.rnr_retry = attr->rnr_retry;
1031 resp.alt_port_num = attr->alt_port_num;
1032 resp.alt_timeout = attr->alt_timeout;
1034 memcpy(resp.dest.dgid, attr->ah_attr.grh.dgid.raw, 16);
1035 resp.dest.flow_label = attr->ah_attr.grh.flow_label;
1036 resp.dest.sgid_index = attr->ah_attr.grh.sgid_index;
1037 resp.dest.hop_limit = attr->ah_attr.grh.hop_limit;
1038 resp.dest.traffic_class = attr->ah_attr.grh.traffic_class;
1039 resp.dest.dlid = attr->ah_attr.dlid;
1040 resp.dest.sl = attr->ah_attr.sl;
1041 resp.dest.src_path_bits = attr->ah_attr.src_path_bits;
1042 resp.dest.static_rate = attr->ah_attr.static_rate;
1043 resp.dest.is_global = !!(attr->ah_attr.ah_flags & IB_AH_GRH);
1044 resp.dest.port_num = attr->ah_attr.port_num;
1046 memcpy(resp.alt_dest.dgid, attr->alt_ah_attr.grh.dgid.raw, 16);
1047 resp.alt_dest.flow_label = attr->alt_ah_attr.grh.flow_label;
1048 resp.alt_dest.sgid_index = attr->alt_ah_attr.grh.sgid_index;
1049 resp.alt_dest.hop_limit = attr->alt_ah_attr.grh.hop_limit;
1050 resp.alt_dest.traffic_class = attr->alt_ah_attr.grh.traffic_class;
1051 resp.alt_dest.dlid = attr->alt_ah_attr.dlid;
1052 resp.alt_dest.sl = attr->alt_ah_attr.sl;
1053 resp.alt_dest.src_path_bits = attr->alt_ah_attr.src_path_bits;
1054 resp.alt_dest.static_rate = attr->alt_ah_attr.static_rate;
1055 resp.alt_dest.is_global = !!(attr->alt_ah_attr.ah_flags & IB_AH_GRH);
1056 resp.alt_dest.port_num = attr->alt_ah_attr.port_num;
1058 resp.max_send_wr = init_attr->cap.max_send_wr;
1059 resp.max_recv_wr = init_attr->cap.max_recv_wr;
1060 resp.max_send_sge = init_attr->cap.max_send_sge;
1061 resp.max_recv_sge = init_attr->cap.max_recv_sge;
1062 resp.max_inline_data = init_attr->cap.max_inline_data;
1063 resp.sq_sig_all = init_attr->sq_sig_type == IB_SIGNAL_ALL_WR;
1065 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1066 &resp, sizeof resp))
1073 return ret ? ret : in_len;
1076 ssize_t ib_uverbs_modify_qp(struct ib_uverbs_file *file,
1077 const char __user *buf, int in_len,
1080 struct ib_uverbs_modify_qp cmd;
1082 struct ib_qp_attr *attr;
1085 if (copy_from_user(&cmd, buf, sizeof cmd))
1088 attr = kmalloc(sizeof *attr, GFP_KERNEL);
1092 mutex_lock(&ib_uverbs_idr_mutex);
1094 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1095 if (!qp || qp->uobject->context != file->ucontext) {
1100 attr->qp_state = cmd.qp_state;
1101 attr->cur_qp_state = cmd.cur_qp_state;
1102 attr->path_mtu = cmd.path_mtu;
1103 attr->path_mig_state = cmd.path_mig_state;
1104 attr->qkey = cmd.qkey;
1105 attr->rq_psn = cmd.rq_psn;
1106 attr->sq_psn = cmd.sq_psn;
1107 attr->dest_qp_num = cmd.dest_qp_num;
1108 attr->qp_access_flags = cmd.qp_access_flags;
1109 attr->pkey_index = cmd.pkey_index;
1110 attr->alt_pkey_index = cmd.alt_pkey_index;
1111 attr->en_sqd_async_notify = cmd.en_sqd_async_notify;
1112 attr->max_rd_atomic = cmd.max_rd_atomic;
1113 attr->max_dest_rd_atomic = cmd.max_dest_rd_atomic;
1114 attr->min_rnr_timer = cmd.min_rnr_timer;
1115 attr->port_num = cmd.port_num;
1116 attr->timeout = cmd.timeout;
1117 attr->retry_cnt = cmd.retry_cnt;
1118 attr->rnr_retry = cmd.rnr_retry;
1119 attr->alt_port_num = cmd.alt_port_num;
1120 attr->alt_timeout = cmd.alt_timeout;
1122 memcpy(attr->ah_attr.grh.dgid.raw, cmd.dest.dgid, 16);
1123 attr->ah_attr.grh.flow_label = cmd.dest.flow_label;
1124 attr->ah_attr.grh.sgid_index = cmd.dest.sgid_index;
1125 attr->ah_attr.grh.hop_limit = cmd.dest.hop_limit;
1126 attr->ah_attr.grh.traffic_class = cmd.dest.traffic_class;
1127 attr->ah_attr.dlid = cmd.dest.dlid;
1128 attr->ah_attr.sl = cmd.dest.sl;
1129 attr->ah_attr.src_path_bits = cmd.dest.src_path_bits;
1130 attr->ah_attr.static_rate = cmd.dest.static_rate;
1131 attr->ah_attr.ah_flags = cmd.dest.is_global ? IB_AH_GRH : 0;
1132 attr->ah_attr.port_num = cmd.dest.port_num;
1134 memcpy(attr->alt_ah_attr.grh.dgid.raw, cmd.alt_dest.dgid, 16);
1135 attr->alt_ah_attr.grh.flow_label = cmd.alt_dest.flow_label;
1136 attr->alt_ah_attr.grh.sgid_index = cmd.alt_dest.sgid_index;
1137 attr->alt_ah_attr.grh.hop_limit = cmd.alt_dest.hop_limit;
1138 attr->alt_ah_attr.grh.traffic_class = cmd.alt_dest.traffic_class;
1139 attr->alt_ah_attr.dlid = cmd.alt_dest.dlid;
1140 attr->alt_ah_attr.sl = cmd.alt_dest.sl;
1141 attr->alt_ah_attr.src_path_bits = cmd.alt_dest.src_path_bits;
1142 attr->alt_ah_attr.static_rate = cmd.alt_dest.static_rate;
1143 attr->alt_ah_attr.ah_flags = cmd.alt_dest.is_global ? IB_AH_GRH : 0;
1144 attr->alt_ah_attr.port_num = cmd.alt_dest.port_num;
1146 ret = ib_modify_qp(qp, attr, cmd.attr_mask);
1153 mutex_unlock(&ib_uverbs_idr_mutex);
1159 ssize_t ib_uverbs_destroy_qp(struct ib_uverbs_file *file,
1160 const char __user *buf, int in_len,
1163 struct ib_uverbs_destroy_qp cmd;
1164 struct ib_uverbs_destroy_qp_resp resp;
1166 struct ib_uqp_object *uobj;
1169 if (copy_from_user(&cmd, buf, sizeof cmd))
1172 memset(&resp, 0, sizeof resp);
1174 mutex_lock(&ib_uverbs_idr_mutex);
1176 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1177 if (!qp || qp->uobject->context != file->ucontext)
1180 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1182 if (!list_empty(&uobj->mcast_list)) {
1187 ret = ib_destroy_qp(qp);
1191 idr_remove(&ib_uverbs_qp_idr, cmd.qp_handle);
1193 mutex_lock(&file->mutex);
1194 list_del(&uobj->uevent.uobject.list);
1195 mutex_unlock(&file->mutex);
1197 ib_uverbs_release_uevent(file, &uobj->uevent);
1199 resp.events_reported = uobj->uevent.events_reported;
1203 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1204 &resp, sizeof resp))
1208 mutex_unlock(&ib_uverbs_idr_mutex);
1210 return ret ? ret : in_len;
1213 ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
1214 const char __user *buf, int in_len,
1217 struct ib_uverbs_post_send cmd;
1218 struct ib_uverbs_post_send_resp resp;
1219 struct ib_uverbs_send_wr *user_wr;
1220 struct ib_send_wr *wr = NULL, *last, *next, *bad_wr;
1223 ssize_t ret = -EINVAL;
1225 if (copy_from_user(&cmd, buf, sizeof cmd))
1228 if (in_len < sizeof cmd + cmd.wqe_size * cmd.wr_count +
1229 cmd.sge_count * sizeof (struct ib_uverbs_sge))
1232 if (cmd.wqe_size < sizeof (struct ib_uverbs_send_wr))
1235 user_wr = kmalloc(cmd.wqe_size, GFP_KERNEL);
1239 mutex_lock(&ib_uverbs_idr_mutex);
1241 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1242 if (!qp || qp->uobject->context != file->ucontext)
1247 for (i = 0; i < cmd.wr_count; ++i) {
1248 if (copy_from_user(user_wr,
1249 buf + sizeof cmd + i * cmd.wqe_size,
1255 if (user_wr->num_sge + sg_ind > cmd.sge_count) {
1260 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1261 user_wr->num_sge * sizeof (struct ib_sge),
1275 next->wr_id = user_wr->wr_id;
1276 next->num_sge = user_wr->num_sge;
1277 next->opcode = user_wr->opcode;
1278 next->send_flags = user_wr->send_flags;
1279 next->imm_data = (__be32 __force) user_wr->imm_data;
1281 if (qp->qp_type == IB_QPT_UD) {
1282 next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr,
1284 if (!next->wr.ud.ah) {
1288 next->wr.ud.remote_qpn = user_wr->wr.ud.remote_qpn;
1289 next->wr.ud.remote_qkey = user_wr->wr.ud.remote_qkey;
1291 switch (next->opcode) {
1292 case IB_WR_RDMA_WRITE:
1293 case IB_WR_RDMA_WRITE_WITH_IMM:
1294 case IB_WR_RDMA_READ:
1295 next->wr.rdma.remote_addr =
1296 user_wr->wr.rdma.remote_addr;
1297 next->wr.rdma.rkey =
1298 user_wr->wr.rdma.rkey;
1300 case IB_WR_ATOMIC_CMP_AND_SWP:
1301 case IB_WR_ATOMIC_FETCH_AND_ADD:
1302 next->wr.atomic.remote_addr =
1303 user_wr->wr.atomic.remote_addr;
1304 next->wr.atomic.compare_add =
1305 user_wr->wr.atomic.compare_add;
1306 next->wr.atomic.swap = user_wr->wr.atomic.swap;
1307 next->wr.atomic.rkey = user_wr->wr.atomic.rkey;
1314 if (next->num_sge) {
1315 next->sg_list = (void *) next +
1316 ALIGN(sizeof *next, sizeof (struct ib_sge));
1317 if (copy_from_user(next->sg_list,
1319 cmd.wr_count * cmd.wqe_size +
1320 sg_ind * sizeof (struct ib_sge),
1321 next->num_sge * sizeof (struct ib_sge))) {
1325 sg_ind += next->num_sge;
1327 next->sg_list = NULL;
1331 ret = qp->device->post_send(qp, wr, &bad_wr);
1333 for (next = wr; next; next = next->next) {
1339 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1340 &resp, sizeof resp))
1344 mutex_unlock(&ib_uverbs_idr_mutex);
1354 return ret ? ret : in_len;
1357 static struct ib_recv_wr *ib_uverbs_unmarshall_recv(const char __user *buf,
1363 struct ib_uverbs_recv_wr *user_wr;
1364 struct ib_recv_wr *wr = NULL, *last, *next;
1369 if (in_len < wqe_size * wr_count +
1370 sge_count * sizeof (struct ib_uverbs_sge))
1371 return ERR_PTR(-EINVAL);
1373 if (wqe_size < sizeof (struct ib_uverbs_recv_wr))
1374 return ERR_PTR(-EINVAL);
1376 user_wr = kmalloc(wqe_size, GFP_KERNEL);
1378 return ERR_PTR(-ENOMEM);
1382 for (i = 0; i < wr_count; ++i) {
1383 if (copy_from_user(user_wr, buf + i * wqe_size,
1389 if (user_wr->num_sge + sg_ind > sge_count) {
1394 next = kmalloc(ALIGN(sizeof *next, sizeof (struct ib_sge)) +
1395 user_wr->num_sge * sizeof (struct ib_sge),
1409 next->wr_id = user_wr->wr_id;
1410 next->num_sge = user_wr->num_sge;
1412 if (next->num_sge) {
1413 next->sg_list = (void *) next +
1414 ALIGN(sizeof *next, sizeof (struct ib_sge));
1415 if (copy_from_user(next->sg_list,
1416 buf + wr_count * wqe_size +
1417 sg_ind * sizeof (struct ib_sge),
1418 next->num_sge * sizeof (struct ib_sge))) {
1422 sg_ind += next->num_sge;
1424 next->sg_list = NULL;
1439 return ERR_PTR(ret);
1442 ssize_t ib_uverbs_post_recv(struct ib_uverbs_file *file,
1443 const char __user *buf, int in_len,
1446 struct ib_uverbs_post_recv cmd;
1447 struct ib_uverbs_post_recv_resp resp;
1448 struct ib_recv_wr *wr, *next, *bad_wr;
1450 ssize_t ret = -EINVAL;
1452 if (copy_from_user(&cmd, buf, sizeof cmd))
1455 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1456 in_len - sizeof cmd, cmd.wr_count,
1457 cmd.sge_count, cmd.wqe_size);
1461 mutex_lock(&ib_uverbs_idr_mutex);
1463 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1464 if (!qp || qp->uobject->context != file->ucontext)
1468 ret = qp->device->post_recv(qp, wr, &bad_wr);
1470 for (next = wr; next; next = next->next) {
1477 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1478 &resp, sizeof resp))
1482 mutex_unlock(&ib_uverbs_idr_mutex);
1490 return ret ? ret : in_len;
1493 ssize_t ib_uverbs_post_srq_recv(struct ib_uverbs_file *file,
1494 const char __user *buf, int in_len,
1497 struct ib_uverbs_post_srq_recv cmd;
1498 struct ib_uverbs_post_srq_recv_resp resp;
1499 struct ib_recv_wr *wr, *next, *bad_wr;
1501 ssize_t ret = -EINVAL;
1503 if (copy_from_user(&cmd, buf, sizeof cmd))
1506 wr = ib_uverbs_unmarshall_recv(buf + sizeof cmd,
1507 in_len - sizeof cmd, cmd.wr_count,
1508 cmd.sge_count, cmd.wqe_size);
1512 mutex_lock(&ib_uverbs_idr_mutex);
1514 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1515 if (!srq || srq->uobject->context != file->ucontext)
1519 ret = srq->device->post_srq_recv(srq, wr, &bad_wr);
1521 for (next = wr; next; next = next->next) {
1528 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1529 &resp, sizeof resp))
1533 mutex_unlock(&ib_uverbs_idr_mutex);
1541 return ret ? ret : in_len;
1544 ssize_t ib_uverbs_create_ah(struct ib_uverbs_file *file,
1545 const char __user *buf, int in_len,
1548 struct ib_uverbs_create_ah cmd;
1549 struct ib_uverbs_create_ah_resp resp;
1550 struct ib_uobject *uobj;
1553 struct ib_ah_attr attr;
1556 if (out_len < sizeof resp)
1559 if (copy_from_user(&cmd, buf, sizeof cmd))
1562 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1566 mutex_lock(&ib_uverbs_idr_mutex);
1568 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
1569 if (!pd || pd->uobject->context != file->ucontext) {
1574 uobj->user_handle = cmd.user_handle;
1575 uobj->context = file->ucontext;
1577 attr.dlid = cmd.attr.dlid;
1578 attr.sl = cmd.attr.sl;
1579 attr.src_path_bits = cmd.attr.src_path_bits;
1580 attr.static_rate = cmd.attr.static_rate;
1581 attr.ah_flags = cmd.attr.is_global ? IB_AH_GRH : 0;
1582 attr.port_num = cmd.attr.port_num;
1583 attr.grh.flow_label = cmd.attr.grh.flow_label;
1584 attr.grh.sgid_index = cmd.attr.grh.sgid_index;
1585 attr.grh.hop_limit = cmd.attr.grh.hop_limit;
1586 attr.grh.traffic_class = cmd.attr.grh.traffic_class;
1587 memcpy(attr.grh.dgid.raw, cmd.attr.grh.dgid, 16);
1589 ah = ib_create_ah(pd, &attr);
1597 ret = idr_add_uobj(&ib_uverbs_ah_idr, ah, uobj);
1601 resp.ah_handle = uobj->id;
1603 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1604 &resp, sizeof resp)) {
1609 mutex_lock(&file->mutex);
1610 list_add_tail(&uobj->list, &file->ucontext->ah_list);
1611 mutex_unlock(&file->mutex);
1613 mutex_unlock(&ib_uverbs_idr_mutex);
1618 idr_remove(&ib_uverbs_ah_idr, uobj->id);
1624 mutex_unlock(&ib_uverbs_idr_mutex);
1630 ssize_t ib_uverbs_destroy_ah(struct ib_uverbs_file *file,
1631 const char __user *buf, int in_len, int out_len)
1633 struct ib_uverbs_destroy_ah cmd;
1635 struct ib_uobject *uobj;
1638 if (copy_from_user(&cmd, buf, sizeof cmd))
1641 mutex_lock(&ib_uverbs_idr_mutex);
1643 ah = idr_find(&ib_uverbs_ah_idr, cmd.ah_handle);
1644 if (!ah || ah->uobject->context != file->ucontext)
1649 ret = ib_destroy_ah(ah);
1653 idr_remove(&ib_uverbs_ah_idr, cmd.ah_handle);
1655 mutex_lock(&file->mutex);
1656 list_del(&uobj->list);
1657 mutex_unlock(&file->mutex);
1662 mutex_unlock(&ib_uverbs_idr_mutex);
1664 return ret ? ret : in_len;
1667 ssize_t ib_uverbs_attach_mcast(struct ib_uverbs_file *file,
1668 const char __user *buf, int in_len,
1671 struct ib_uverbs_attach_mcast cmd;
1673 struct ib_uqp_object *uobj;
1674 struct ib_uverbs_mcast_entry *mcast;
1677 if (copy_from_user(&cmd, buf, sizeof cmd))
1680 mutex_lock(&ib_uverbs_idr_mutex);
1682 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1683 if (!qp || qp->uobject->context != file->ucontext)
1686 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1688 list_for_each_entry(mcast, &uobj->mcast_list, list)
1689 if (cmd.mlid == mcast->lid &&
1690 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1695 mcast = kmalloc(sizeof *mcast, GFP_KERNEL);
1701 mcast->lid = cmd.mlid;
1702 memcpy(mcast->gid.raw, cmd.gid, sizeof mcast->gid.raw);
1704 ret = ib_attach_mcast(qp, &mcast->gid, cmd.mlid);
1706 uobj = container_of(qp->uobject, struct ib_uqp_object,
1708 list_add_tail(&mcast->list, &uobj->mcast_list);
1713 mutex_unlock(&ib_uverbs_idr_mutex);
1715 return ret ? ret : in_len;
1718 ssize_t ib_uverbs_detach_mcast(struct ib_uverbs_file *file,
1719 const char __user *buf, int in_len,
1722 struct ib_uverbs_detach_mcast cmd;
1723 struct ib_uqp_object *uobj;
1725 struct ib_uverbs_mcast_entry *mcast;
1728 if (copy_from_user(&cmd, buf, sizeof cmd))
1731 mutex_lock(&ib_uverbs_idr_mutex);
1733 qp = idr_find(&ib_uverbs_qp_idr, cmd.qp_handle);
1734 if (!qp || qp->uobject->context != file->ucontext)
1737 ret = ib_detach_mcast(qp, (union ib_gid *) cmd.gid, cmd.mlid);
1741 uobj = container_of(qp->uobject, struct ib_uqp_object, uevent.uobject);
1743 list_for_each_entry(mcast, &uobj->mcast_list, list)
1744 if (cmd.mlid == mcast->lid &&
1745 !memcmp(cmd.gid, mcast->gid.raw, sizeof mcast->gid.raw)) {
1746 list_del(&mcast->list);
1752 mutex_unlock(&ib_uverbs_idr_mutex);
1754 return ret ? ret : in_len;
1757 ssize_t ib_uverbs_create_srq(struct ib_uverbs_file *file,
1758 const char __user *buf, int in_len,
1761 struct ib_uverbs_create_srq cmd;
1762 struct ib_uverbs_create_srq_resp resp;
1763 struct ib_udata udata;
1764 struct ib_uevent_object *uobj;
1767 struct ib_srq_init_attr attr;
1770 if (out_len < sizeof resp)
1773 if (copy_from_user(&cmd, buf, sizeof cmd))
1776 INIT_UDATA(&udata, buf + sizeof cmd,
1777 (unsigned long) cmd.response + sizeof resp,
1778 in_len - sizeof cmd, out_len - sizeof resp);
1780 uobj = kmalloc(sizeof *uobj, GFP_KERNEL);
1784 mutex_lock(&ib_uverbs_idr_mutex);
1786 pd = idr_find(&ib_uverbs_pd_idr, cmd.pd_handle);
1788 if (!pd || pd->uobject->context != file->ucontext) {
1793 attr.event_handler = ib_uverbs_srq_event_handler;
1794 attr.srq_context = file;
1795 attr.attr.max_wr = cmd.max_wr;
1796 attr.attr.max_sge = cmd.max_sge;
1797 attr.attr.srq_limit = cmd.srq_limit;
1799 uobj->uobject.user_handle = cmd.user_handle;
1800 uobj->uobject.context = file->ucontext;
1801 uobj->events_reported = 0;
1802 INIT_LIST_HEAD(&uobj->event_list);
1804 srq = pd->device->create_srq(pd, &attr, &udata);
1810 srq->device = pd->device;
1812 srq->uobject = &uobj->uobject;
1813 srq->event_handler = attr.event_handler;
1814 srq->srq_context = attr.srq_context;
1815 atomic_inc(&pd->usecnt);
1816 atomic_set(&srq->usecnt, 0);
1818 memset(&resp, 0, sizeof resp);
1820 ret = idr_add_uobj(&ib_uverbs_srq_idr, srq, &uobj->uobject);
1824 resp.srq_handle = uobj->uobject.id;
1825 resp.max_wr = attr.attr.max_wr;
1826 resp.max_sge = attr.attr.max_sge;
1828 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1829 &resp, sizeof resp)) {
1834 mutex_lock(&file->mutex);
1835 list_add_tail(&uobj->uobject.list, &file->ucontext->srq_list);
1836 mutex_unlock(&file->mutex);
1838 mutex_unlock(&ib_uverbs_idr_mutex);
1843 idr_remove(&ib_uverbs_srq_idr, uobj->uobject.id);
1846 ib_destroy_srq(srq);
1849 mutex_unlock(&ib_uverbs_idr_mutex);
1855 ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
1856 const char __user *buf, int in_len,
1859 struct ib_uverbs_modify_srq cmd;
1861 struct ib_srq_attr attr;
1864 if (copy_from_user(&cmd, buf, sizeof cmd))
1867 mutex_lock(&ib_uverbs_idr_mutex);
1869 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1870 if (!srq || srq->uobject->context != file->ucontext) {
1875 attr.max_wr = cmd.max_wr;
1876 attr.srq_limit = cmd.srq_limit;
1878 ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
1881 mutex_unlock(&ib_uverbs_idr_mutex);
1883 return ret ? ret : in_len;
1886 ssize_t ib_uverbs_query_srq(struct ib_uverbs_file *file,
1887 const char __user *buf,
1888 int in_len, int out_len)
1890 struct ib_uverbs_query_srq cmd;
1891 struct ib_uverbs_query_srq_resp resp;
1892 struct ib_srq_attr attr;
1896 if (out_len < sizeof resp)
1899 if (copy_from_user(&cmd, buf, sizeof cmd))
1902 mutex_lock(&ib_uverbs_idr_mutex);
1904 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1905 if (srq && srq->uobject->context == file->ucontext)
1906 ret = ib_query_srq(srq, &attr);
1910 mutex_unlock(&ib_uverbs_idr_mutex);
1915 memset(&resp, 0, sizeof resp);
1917 resp.max_wr = attr.max_wr;
1918 resp.max_sge = attr.max_sge;
1919 resp.srq_limit = attr.srq_limit;
1921 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1922 &resp, sizeof resp))
1926 return ret ? ret : in_len;
1929 ssize_t ib_uverbs_destroy_srq(struct ib_uverbs_file *file,
1930 const char __user *buf, int in_len,
1933 struct ib_uverbs_destroy_srq cmd;
1934 struct ib_uverbs_destroy_srq_resp resp;
1936 struct ib_uevent_object *uobj;
1939 if (copy_from_user(&cmd, buf, sizeof cmd))
1942 mutex_lock(&ib_uverbs_idr_mutex);
1944 memset(&resp, 0, sizeof resp);
1946 srq = idr_find(&ib_uverbs_srq_idr, cmd.srq_handle);
1947 if (!srq || srq->uobject->context != file->ucontext)
1950 uobj = container_of(srq->uobject, struct ib_uevent_object, uobject);
1952 ret = ib_destroy_srq(srq);
1956 idr_remove(&ib_uverbs_srq_idr, cmd.srq_handle);
1958 mutex_lock(&file->mutex);
1959 list_del(&uobj->uobject.list);
1960 mutex_unlock(&file->mutex);
1962 ib_uverbs_release_uevent(file, uobj);
1964 resp.events_reported = uobj->events_reported;
1968 if (copy_to_user((void __user *) (unsigned long) cmd.response,
1969 &resp, sizeof resp))
1973 mutex_unlock(&ib_uverbs_idr_mutex);
1975 return ret ? ret : in_len;