]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/infiniband/hw/mlx4/main.c
IB/core: Add VLAN support for IBoE
[net-next-2.6.git] / drivers / infiniband / hw / mlx4 / main.c
CommitLineData
225c7b1f
RD
1/*
2 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
51a379d0 3 * Copyright (c) 2007, 2008 Mellanox Technologies. All rights reserved.
225c7b1f
RD
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/module.h>
35#include <linux/init.h>
5a0e3ad6 36#include <linux/slab.h>
225c7b1f 37#include <linux/errno.h>
fa417f7b
EC
38#include <linux/netdevice.h>
39#include <linux/inetdevice.h>
40#include <linux/rtnetlink.h>
225c7b1f
RD
41
42#include <rdma/ib_smi.h>
43#include <rdma/ib_user_verbs.h>
fa417f7b 44#include <rdma/ib_addr.h>
225c7b1f
RD
45
46#include <linux/mlx4/driver.h>
47#include <linux/mlx4/cmd.h>
48
49#include "mlx4_ib.h"
50#include "user.h"
51
52#define DRV_NAME "mlx4_ib"
068c4ea1
JM
53#define DRV_VERSION "1.0"
54#define DRV_RELDATE "April 4, 2008"
225c7b1f
RD
55
56MODULE_AUTHOR("Roland Dreier");
57MODULE_DESCRIPTION("Mellanox ConnectX HCA InfiniBand driver");
58MODULE_LICENSE("Dual BSD/GPL");
59MODULE_VERSION(DRV_VERSION);
60
68f3948d 61static const char mlx4_ib_version[] =
225c7b1f
RD
62 DRV_NAME ": Mellanox ConnectX InfiniBand driver v"
63 DRV_VERSION " (" DRV_RELDATE ")\n";
64
fa417f7b
EC
65struct update_gid_work {
66 struct work_struct work;
67 union ib_gid gids[128];
68 struct mlx4_ib_dev *dev;
69 int port;
70};
71
72static struct workqueue_struct *wq;
73
225c7b1f
RD
74static void init_query_mad(struct ib_smp *mad)
75{
76 mad->base_version = 1;
77 mad->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED;
78 mad->class_version = 1;
79 mad->method = IB_MGMT_METHOD_GET;
80}
81
82static int mlx4_ib_query_device(struct ib_device *ibdev,
83 struct ib_device_attr *props)
84{
85 struct mlx4_ib_dev *dev = to_mdev(ibdev);
86 struct ib_smp *in_mad = NULL;
87 struct ib_smp *out_mad = NULL;
88 int err = -ENOMEM;
89
90 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
91 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
92 if (!in_mad || !out_mad)
93 goto out;
94
95 init_query_mad(in_mad);
96 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
97
98 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, 1, NULL, NULL, in_mad, out_mad);
99 if (err)
100 goto out;
101
102 memset(props, 0, sizeof *props);
103
104 props->fw_ver = dev->dev->caps.fw_ver;
105 props->device_cap_flags = IB_DEVICE_CHANGE_PHY_PORT |
106 IB_DEVICE_PORT_ACTIVE_EVENT |
107 IB_DEVICE_SYS_IMAGE_GUID |
521e575b
RL
108 IB_DEVICE_RC_RNR_NAK_GEN |
109 IB_DEVICE_BLOCK_MULTICAST_LOOPBACK;
225c7b1f
RD
110 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_PKEY_CNTR)
111 props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
112 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BAD_QKEY_CNTR)
113 props->device_cap_flags |= IB_DEVICE_BAD_QKEY_CNTR;
114 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_APM)
115 props->device_cap_flags |= IB_DEVICE_AUTO_PATH_MIG;
116 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_UD_AV_PORT)
117 props->device_cap_flags |= IB_DEVICE_UD_AV_PORT_ENFORCE;
8ff095ec
EC
118 if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_IPOIB_CSUM)
119 props->device_cap_flags |= IB_DEVICE_UD_IP_CSUM;
417608c2 120 if (dev->dev->caps.max_gso_sz && dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_BLH)
b832be1e 121 props->device_cap_flags |= IB_DEVICE_UD_TSO;
95d04f07
RD
122 if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_RESERVED_LKEY)
123 props->device_cap_flags |= IB_DEVICE_LOCAL_DMA_LKEY;
124 if ((dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_LOCAL_INV) &&
125 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_REMOTE_INV) &&
126 (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_FAST_REG_WR))
127 props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;
225c7b1f
RD
128
129 props->vendor_id = be32_to_cpup((__be32 *) (out_mad->data + 36)) &
130 0xffffff;
131 props->vendor_part_id = be16_to_cpup((__be16 *) (out_mad->data + 30));
132 props->hw_ver = be32_to_cpup((__be32 *) (out_mad->data + 32));
133 memcpy(&props->sys_image_guid, out_mad->data + 4, 8);
134
135 props->max_mr_size = ~0ull;
136 props->page_size_cap = dev->dev->caps.page_size_cap;
137 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
138 props->max_qp_wr = dev->dev->caps.max_wqes;
139 props->max_sge = min(dev->dev->caps.max_sq_sg,
140 dev->dev->caps.max_rq_sg);
141 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
142 props->max_cqe = dev->dev->caps.max_cqes;
143 props->max_mr = dev->dev->caps.num_mpts - dev->dev->caps.reserved_mrws;
144 props->max_pd = dev->dev->caps.num_pds - dev->dev->caps.reserved_pds;
145 props->max_qp_rd_atom = dev->dev->caps.max_qp_dest_rdma;
146 props->max_qp_init_rd_atom = dev->dev->caps.max_qp_init_rdma;
147 props->max_res_rd_atom = props->max_qp_rd_atom * props->max_qp;
148 props->max_srq = dev->dev->caps.num_srqs - dev->dev->caps.reserved_srqs;
c8681f14 149 props->max_srq_wr = dev->dev->caps.max_srq_wqes - 1;
225c7b1f 150 props->max_srq_sge = dev->dev->caps.max_srq_sge;
95d04f07 151 props->max_fast_reg_page_list_len = PAGE_SIZE / sizeof (u64);
225c7b1f
RD
152 props->local_ca_ack_delay = dev->dev->caps.local_ca_ack_delay;
153 props->atomic_cap = dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_ATOMIC ?
154 IB_ATOMIC_HCA : IB_ATOMIC_NONE;
6fa8f719 155 props->masked_atomic_cap = IB_ATOMIC_HCA;
5ae2a7a8 156 props->max_pkeys = dev->dev->caps.pkey_table_len[1];
225c7b1f
RD
157 props->max_mcast_grp = dev->dev->caps.num_mgms + dev->dev->caps.num_amgms;
158 props->max_mcast_qp_attach = dev->dev->caps.num_qp_per_mgm;
159 props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
160 props->max_mcast_grp;
161 props->max_map_per_fmr = (1 << (32 - ilog2(dev->dev->caps.num_mpts))) - 1;
162
163out:
164 kfree(in_mad);
165 kfree(out_mad);
166
167 return err;
168}
169
fa417f7b
EC
170static enum rdma_link_layer
171mlx4_ib_port_link_layer(struct ib_device *device, u8 port_num)
225c7b1f 172{
fa417f7b 173 struct mlx4_dev *dev = to_mdev(device)->dev;
225c7b1f 174
fa417f7b
EC
175 return dev->caps.port_mask & (1 << (port_num - 1)) ?
176 IB_LINK_LAYER_INFINIBAND : IB_LINK_LAYER_ETHERNET;
177}
225c7b1f 178
fa417f7b
EC
179static int ib_link_query_port(struct ib_device *ibdev, u8 port,
180 struct ib_port_attr *props,
181 struct ib_smp *out_mad)
182{
225c7b1f
RD
183 props->lid = be16_to_cpup((__be16 *) (out_mad->data + 16));
184 props->lmc = out_mad->data[34] & 0x7;
185 props->sm_lid = be16_to_cpup((__be16 *) (out_mad->data + 18));
186 props->sm_sl = out_mad->data[36] & 0xf;
187 props->state = out_mad->data[32] & 0xf;
188 props->phys_state = out_mad->data[33] >> 4;
189 props->port_cap_flags = be32_to_cpup((__be32 *) (out_mad->data + 20));
5ae2a7a8 190 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
149983af 191 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
5ae2a7a8 192 props->pkey_tbl_len = to_mdev(ibdev)->dev->caps.pkey_table_len[port];
225c7b1f
RD
193 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
194 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
195 props->active_width = out_mad->data[31] & 0xf;
196 props->active_speed = out_mad->data[35] >> 4;
197 props->max_mtu = out_mad->data[41] & 0xf;
198 props->active_mtu = out_mad->data[36] >> 4;
199 props->subnet_timeout = out_mad->data[51] & 0x1f;
200 props->max_vl_num = out_mad->data[37] >> 4;
201 props->init_type_reply = out_mad->data[41] >> 4;
202
fa417f7b
EC
203 return 0;
204}
205
206static u8 state_to_phys_state(enum ib_port_state state)
207{
208 return state == IB_PORT_ACTIVE ? 5 : 3;
209}
210
211static int eth_link_query_port(struct ib_device *ibdev, u8 port,
212 struct ib_port_attr *props,
213 struct ib_smp *out_mad)
214{
215 struct mlx4_ib_iboe *iboe = &to_mdev(ibdev)->iboe;
216 struct net_device *ndev;
217 enum ib_mtu tmp;
218
219 props->active_width = IB_WIDTH_4X;
220 props->active_speed = 4;
221 props->port_cap_flags = IB_PORT_CM_SUP;
222 props->gid_tbl_len = to_mdev(ibdev)->dev->caps.gid_table_len[port];
223 props->max_msg_sz = to_mdev(ibdev)->dev->caps.max_msg_sz;
224 props->pkey_tbl_len = 1;
225 props->bad_pkey_cntr = be16_to_cpup((__be16 *) (out_mad->data + 46));
226 props->qkey_viol_cntr = be16_to_cpup((__be16 *) (out_mad->data + 48));
227 props->max_mtu = IB_MTU_2048;
228 props->subnet_timeout = 0;
229 props->max_vl_num = out_mad->data[37] >> 4;
230 props->init_type_reply = 0;
231 props->state = IB_PORT_DOWN;
232 props->phys_state = state_to_phys_state(props->state);
233 props->active_mtu = IB_MTU_256;
234 spin_lock(&iboe->lock);
235 ndev = iboe->netdevs[port - 1];
236 if (!ndev)
237 goto out;
238
239 tmp = iboe_get_mtu(ndev->mtu);
240 props->active_mtu = tmp ? min(props->max_mtu, tmp) : IB_MTU_256;
241
242 props->state = netif_running(ndev) && netif_oper_up(ndev) ?
243 IB_PORT_ACTIVE : IB_PORT_DOWN;
244 props->phys_state = state_to_phys_state(props->state);
245
246out:
247 spin_unlock(&iboe->lock);
248 return 0;
249}
250
251static int mlx4_ib_query_port(struct ib_device *ibdev, u8 port,
252 struct ib_port_attr *props)
253{
254 struct ib_smp *in_mad = NULL;
255 struct ib_smp *out_mad = NULL;
256 int err = -ENOMEM;
257
258 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
259 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
260 if (!in_mad || !out_mad)
261 goto out;
262
263 memset(props, 0, sizeof *props);
264
265 init_query_mad(in_mad);
266 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
267 in_mad->attr_mod = cpu_to_be32(port);
268
269 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
270 if (err)
271 goto out;
272
273 err = mlx4_ib_port_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND ?
274 ib_link_query_port(ibdev, port, props, out_mad) :
275 eth_link_query_port(ibdev, port, props, out_mad);
276
225c7b1f
RD
277out:
278 kfree(in_mad);
279 kfree(out_mad);
280
281 return err;
282}
283
fa417f7b
EC
284static int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
285 union ib_gid *gid)
225c7b1f
RD
286{
287 struct ib_smp *in_mad = NULL;
288 struct ib_smp *out_mad = NULL;
289 int err = -ENOMEM;
290
291 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
292 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
293 if (!in_mad || !out_mad)
294 goto out;
295
296 init_query_mad(in_mad);
297 in_mad->attr_id = IB_SMP_ATTR_PORT_INFO;
298 in_mad->attr_mod = cpu_to_be32(port);
299
300 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
301 if (err)
302 goto out;
303
304 memcpy(gid->raw, out_mad->data + 8, 8);
305
306 init_query_mad(in_mad);
307 in_mad->attr_id = IB_SMP_ATTR_GUID_INFO;
308 in_mad->attr_mod = cpu_to_be32(index / 8);
309
310 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
311 if (err)
312 goto out;
313
314 memcpy(gid->raw + 8, out_mad->data + (index % 8) * 8, 8);
315
316out:
317 kfree(in_mad);
318 kfree(out_mad);
319 return err;
320}
321
fa417f7b
EC
322static int iboe_query_gid(struct ib_device *ibdev, u8 port, int index,
323 union ib_gid *gid)
324{
325 struct mlx4_ib_dev *dev = to_mdev(ibdev);
326
327 *gid = dev->iboe.gid_table[port - 1][index];
328
329 return 0;
330}
331
332static int mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,
333 union ib_gid *gid)
334{
335 if (rdma_port_get_link_layer(ibdev, port) == IB_LINK_LAYER_INFINIBAND)
336 return __mlx4_ib_query_gid(ibdev, port, index, gid);
337 else
338 return iboe_query_gid(ibdev, port, index, gid);
339}
340
225c7b1f
RD
341static int mlx4_ib_query_pkey(struct ib_device *ibdev, u8 port, u16 index,
342 u16 *pkey)
343{
344 struct ib_smp *in_mad = NULL;
345 struct ib_smp *out_mad = NULL;
346 int err = -ENOMEM;
347
348 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
349 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
350 if (!in_mad || !out_mad)
351 goto out;
352
353 init_query_mad(in_mad);
354 in_mad->attr_id = IB_SMP_ATTR_PKEY_TABLE;
355 in_mad->attr_mod = cpu_to_be32(index / 32);
356
357 err = mlx4_MAD_IFC(to_mdev(ibdev), 1, 1, port, NULL, NULL, in_mad, out_mad);
358 if (err)
359 goto out;
360
361 *pkey = be16_to_cpu(((__be16 *) out_mad->data)[index % 32]);
362
363out:
364 kfree(in_mad);
365 kfree(out_mad);
366 return err;
367}
368
369static int mlx4_ib_modify_device(struct ib_device *ibdev, int mask,
370 struct ib_device_modify *props)
371{
372 if (mask & ~IB_DEVICE_MODIFY_NODE_DESC)
373 return -EOPNOTSUPP;
374
375 if (mask & IB_DEVICE_MODIFY_NODE_DESC) {
376 spin_lock(&to_mdev(ibdev)->sm_lock);
377 memcpy(ibdev->node_desc, props->node_desc, 64);
378 spin_unlock(&to_mdev(ibdev)->sm_lock);
379 }
380
381 return 0;
382}
383
384static int mlx4_SET_PORT(struct mlx4_ib_dev *dev, u8 port, int reset_qkey_viols,
385 u32 cap_mask)
386{
387 struct mlx4_cmd_mailbox *mailbox;
388 int err;
fa417f7b 389 u8 is_eth = dev->dev->caps.port_type[port] == MLX4_PORT_TYPE_ETH;
225c7b1f
RD
390
391 mailbox = mlx4_alloc_cmd_mailbox(dev->dev);
392 if (IS_ERR(mailbox))
393 return PTR_ERR(mailbox);
394
395 memset(mailbox->buf, 0, 256);
5ae2a7a8
RD
396
397 if (dev->dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
398 *(u8 *) mailbox->buf = !!reset_qkey_viols << 6;
399 ((__be32 *) mailbox->buf)[2] = cpu_to_be32(cap_mask);
400 } else {
401 ((u8 *) mailbox->buf)[3] = !!reset_qkey_viols;
402 ((__be32 *) mailbox->buf)[1] = cpu_to_be32(cap_mask);
403 }
225c7b1f 404
fa417f7b 405 err = mlx4_cmd(dev->dev, mailbox->dma, port, is_eth, MLX4_CMD_SET_PORT,
225c7b1f
RD
406 MLX4_CMD_TIME_CLASS_B);
407
408 mlx4_free_cmd_mailbox(dev->dev, mailbox);
409 return err;
410}
411
412static int mlx4_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
413 struct ib_port_modify *props)
414{
415 struct ib_port_attr attr;
416 u32 cap_mask;
417 int err;
418
419 mutex_lock(&to_mdev(ibdev)->cap_mask_mutex);
420
421 err = mlx4_ib_query_port(ibdev, port, &attr);
422 if (err)
423 goto out;
424
425 cap_mask = (attr.port_cap_flags | props->set_port_cap_mask) &
426 ~props->clr_port_cap_mask;
427
428 err = mlx4_SET_PORT(to_mdev(ibdev), port,
429 !!(mask & IB_PORT_RESET_QKEY_CNTR),
430 cap_mask);
431
432out:
433 mutex_unlock(&to_mdev(ibdev)->cap_mask_mutex);
434 return err;
435}
436
437static struct ib_ucontext *mlx4_ib_alloc_ucontext(struct ib_device *ibdev,
438 struct ib_udata *udata)
439{
440 struct mlx4_ib_dev *dev = to_mdev(ibdev);
441 struct mlx4_ib_ucontext *context;
442 struct mlx4_ib_alloc_ucontext_resp resp;
443 int err;
444
3b4a8cd5
JM
445 if (!dev->ib_active)
446 return ERR_PTR(-EAGAIN);
447
225c7b1f
RD
448 resp.qp_tab_size = dev->dev->caps.num_qps;
449 resp.bf_reg_size = dev->dev->caps.bf_reg_size;
450 resp.bf_regs_per_page = dev->dev->caps.bf_regs_per_page;
451
452 context = kmalloc(sizeof *context, GFP_KERNEL);
453 if (!context)
454 return ERR_PTR(-ENOMEM);
455
456 err = mlx4_uar_alloc(to_mdev(ibdev)->dev, &context->uar);
457 if (err) {
458 kfree(context);
459 return ERR_PTR(err);
460 }
461
462 INIT_LIST_HEAD(&context->db_page_list);
463 mutex_init(&context->db_page_mutex);
464
465 err = ib_copy_to_udata(udata, &resp, sizeof resp);
466 if (err) {
467 mlx4_uar_free(to_mdev(ibdev)->dev, &context->uar);
468 kfree(context);
469 return ERR_PTR(-EFAULT);
470 }
471
472 return &context->ibucontext;
473}
474
475static int mlx4_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
476{
477 struct mlx4_ib_ucontext *context = to_mucontext(ibcontext);
478
479 mlx4_uar_free(to_mdev(ibcontext->device)->dev, &context->uar);
480 kfree(context);
481
482 return 0;
483}
484
485static int mlx4_ib_mmap(struct ib_ucontext *context, struct vm_area_struct *vma)
486{
487 struct mlx4_ib_dev *dev = to_mdev(context->device);
488
489 if (vma->vm_end - vma->vm_start != PAGE_SIZE)
490 return -EINVAL;
491
492 if (vma->vm_pgoff == 0) {
493 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
494
495 if (io_remap_pfn_range(vma, vma->vm_start,
496 to_mucontext(context)->uar.pfn,
497 PAGE_SIZE, vma->vm_page_prot))
498 return -EAGAIN;
499 } else if (vma->vm_pgoff == 1 && dev->dev->caps.bf_reg_size != 0) {
e1d60ec6 500 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
225c7b1f
RD
501
502 if (io_remap_pfn_range(vma, vma->vm_start,
503 to_mucontext(context)->uar.pfn +
504 dev->dev->caps.num_uars,
505 PAGE_SIZE, vma->vm_page_prot))
506 return -EAGAIN;
507 } else
508 return -EINVAL;
509
510 return 0;
511}
512
513static struct ib_pd *mlx4_ib_alloc_pd(struct ib_device *ibdev,
514 struct ib_ucontext *context,
515 struct ib_udata *udata)
516{
517 struct mlx4_ib_pd *pd;
518 int err;
519
520 pd = kmalloc(sizeof *pd, GFP_KERNEL);
521 if (!pd)
522 return ERR_PTR(-ENOMEM);
523
524 err = mlx4_pd_alloc(to_mdev(ibdev)->dev, &pd->pdn);
525 if (err) {
526 kfree(pd);
527 return ERR_PTR(err);
528 }
529
530 if (context)
531 if (ib_copy_to_udata(udata, &pd->pdn, sizeof (__u32))) {
532 mlx4_pd_free(to_mdev(ibdev)->dev, pd->pdn);
533 kfree(pd);
534 return ERR_PTR(-EFAULT);
535 }
536
537 return &pd->ibpd;
538}
539
540static int mlx4_ib_dealloc_pd(struct ib_pd *pd)
541{
542 mlx4_pd_free(to_mdev(pd->device)->dev, to_mpd(pd)->pdn);
543 kfree(pd);
544
545 return 0;
546}
547
fa417f7b
EC
548static int add_gid_entry(struct ib_qp *ibqp, union ib_gid *gid)
549{
550 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
551 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
552 struct mlx4_ib_gid_entry *ge;
553
554 ge = kzalloc(sizeof *ge, GFP_KERNEL);
555 if (!ge)
556 return -ENOMEM;
557
558 ge->gid = *gid;
559 if (mlx4_ib_add_mc(mdev, mqp, gid)) {
560 ge->port = mqp->port;
561 ge->added = 1;
562 }
563
564 mutex_lock(&mqp->mutex);
565 list_add_tail(&ge->list, &mqp->gid_list);
566 mutex_unlock(&mqp->mutex);
567
568 return 0;
569}
570
571int mlx4_ib_add_mc(struct mlx4_ib_dev *mdev, struct mlx4_ib_qp *mqp,
572 union ib_gid *gid)
573{
574 u8 mac[6];
575 struct net_device *ndev;
576 int ret = 0;
577
578 if (!mqp->port)
579 return 0;
580
581 spin_lock(&mdev->iboe.lock);
582 ndev = mdev->iboe.netdevs[mqp->port - 1];
583 if (ndev)
584 dev_hold(ndev);
585 spin_unlock(&mdev->iboe.lock);
586
587 if (ndev) {
588 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
589 rtnl_lock();
590 dev_mc_add(mdev->iboe.netdevs[mqp->port - 1], mac);
591 ret = 1;
592 rtnl_unlock();
593 dev_put(ndev);
594 }
595
596 return ret;
597}
598
225c7b1f
RD
599static int mlx4_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
600{
fa417f7b
EC
601 int err;
602 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
603 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
604
605 err = mlx4_multicast_attach(mdev->dev, &mqp->mqp, gid->raw, !!(mqp->flags &
606 MLX4_IB_QP_BLOCK_MULTICAST_LOOPBACK));
607 if (err)
608 return err;
609
610 err = add_gid_entry(ibqp, gid);
611 if (err)
612 goto err_add;
613
614 return 0;
615
616err_add:
617 mlx4_multicast_detach(mdev->dev, &mqp->mqp, gid->raw);
618 return err;
619}
620
621static struct mlx4_ib_gid_entry *find_gid_entry(struct mlx4_ib_qp *qp, u8 *raw)
622{
623 struct mlx4_ib_gid_entry *ge;
624 struct mlx4_ib_gid_entry *tmp;
625 struct mlx4_ib_gid_entry *ret = NULL;
626
627 list_for_each_entry_safe(ge, tmp, &qp->gid_list, list) {
628 if (!memcmp(raw, ge->gid.raw, 16)) {
629 ret = ge;
630 break;
631 }
632 }
633
634 return ret;
225c7b1f
RD
635}
636
637static int mlx4_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
638{
fa417f7b
EC
639 int err;
640 struct mlx4_ib_dev *mdev = to_mdev(ibqp->device);
641 struct mlx4_ib_qp *mqp = to_mqp(ibqp);
642 u8 mac[6];
643 struct net_device *ndev;
644 struct mlx4_ib_gid_entry *ge;
645
646 err = mlx4_multicast_detach(mdev->dev,
647 &mqp->mqp, gid->raw);
648 if (err)
649 return err;
650
651 mutex_lock(&mqp->mutex);
652 ge = find_gid_entry(mqp, gid->raw);
653 if (ge) {
654 spin_lock(&mdev->iboe.lock);
655 ndev = ge->added ? mdev->iboe.netdevs[ge->port - 1] : NULL;
656 if (ndev)
657 dev_hold(ndev);
658 spin_unlock(&mdev->iboe.lock);
659 rdma_get_mcast_mac((struct in6_addr *)gid, mac);
660 if (ndev) {
661 rtnl_lock();
662 dev_mc_del(mdev->iboe.netdevs[ge->port - 1], mac);
663 rtnl_unlock();
664 dev_put(ndev);
665 }
666 list_del(&ge->list);
667 kfree(ge);
668 } else
669 printk(KERN_WARNING "could not find mgid entry\n");
670
671 mutex_unlock(&mqp->mutex);
672
673 return 0;
225c7b1f
RD
674}
675
676static int init_node_data(struct mlx4_ib_dev *dev)
677{
678 struct ib_smp *in_mad = NULL;
679 struct ib_smp *out_mad = NULL;
680 int err = -ENOMEM;
681
682 in_mad = kzalloc(sizeof *in_mad, GFP_KERNEL);
683 out_mad = kmalloc(sizeof *out_mad, GFP_KERNEL);
684 if (!in_mad || !out_mad)
685 goto out;
686
687 init_query_mad(in_mad);
688 in_mad->attr_id = IB_SMP_ATTR_NODE_DESC;
689
690 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
691 if (err)
692 goto out;
693
694 memcpy(dev->ib_dev.node_desc, out_mad->data, 64);
695
696 in_mad->attr_id = IB_SMP_ATTR_NODE_INFO;
697
698 err = mlx4_MAD_IFC(dev, 1, 1, 1, NULL, NULL, in_mad, out_mad);
699 if (err)
700 goto out;
701
893da759 702 dev->dev->rev_id = be32_to_cpup((__be32 *) (out_mad->data + 32));
225c7b1f
RD
703 memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
704
705out:
706 kfree(in_mad);
707 kfree(out_mad);
708 return err;
709}
710
f4e91eb4
TJ
711static ssize_t show_hca(struct device *device, struct device_attribute *attr,
712 char *buf)
cd9281d8 713{
f4e91eb4
TJ
714 struct mlx4_ib_dev *dev =
715 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
cd9281d8
JM
716 return sprintf(buf, "MT%d\n", dev->dev->pdev->device);
717}
718
f4e91eb4
TJ
719static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
720 char *buf)
cd9281d8 721{
f4e91eb4
TJ
722 struct mlx4_ib_dev *dev =
723 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
cd9281d8
JM
724 return sprintf(buf, "%d.%d.%d\n", (int) (dev->dev->caps.fw_ver >> 32),
725 (int) (dev->dev->caps.fw_ver >> 16) & 0xffff,
726 (int) dev->dev->caps.fw_ver & 0xffff);
727}
728
f4e91eb4
TJ
729static ssize_t show_rev(struct device *device, struct device_attribute *attr,
730 char *buf)
cd9281d8 731{
f4e91eb4
TJ
732 struct mlx4_ib_dev *dev =
733 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
cd9281d8
JM
734 return sprintf(buf, "%x\n", dev->dev->rev_id);
735}
736
f4e91eb4
TJ
737static ssize_t show_board(struct device *device, struct device_attribute *attr,
738 char *buf)
cd9281d8 739{
f4e91eb4
TJ
740 struct mlx4_ib_dev *dev =
741 container_of(device, struct mlx4_ib_dev, ib_dev.dev);
742 return sprintf(buf, "%.*s\n", MLX4_BOARD_ID_LEN,
743 dev->dev->board_id);
cd9281d8
JM
744}
745
f4e91eb4
TJ
746static DEVICE_ATTR(hw_rev, S_IRUGO, show_rev, NULL);
747static DEVICE_ATTR(fw_ver, S_IRUGO, show_fw_ver, NULL);
748static DEVICE_ATTR(hca_type, S_IRUGO, show_hca, NULL);
749static DEVICE_ATTR(board_id, S_IRUGO, show_board, NULL);
cd9281d8 750
f4e91eb4
TJ
751static struct device_attribute *mlx4_class_attributes[] = {
752 &dev_attr_hw_rev,
753 &dev_attr_fw_ver,
754 &dev_attr_hca_type,
755 &dev_attr_board_id
cd9281d8
JM
756};
757
fa417f7b
EC
758static void mlx4_addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
759{
760 memcpy(eui, dev->dev_addr, 3);
761 memcpy(eui + 5, dev->dev_addr + 3, 3);
762 eui[3] = 0xFF;
763 eui[4] = 0xFE;
764 eui[0] ^= 2;
765}
766
767static void update_gids_task(struct work_struct *work)
768{
769 struct update_gid_work *gw = container_of(work, struct update_gid_work, work);
770 struct mlx4_cmd_mailbox *mailbox;
771 union ib_gid *gids;
772 int err;
773 struct mlx4_dev *dev = gw->dev->dev;
774 struct ib_event event;
775
776 mailbox = mlx4_alloc_cmd_mailbox(dev);
777 if (IS_ERR(mailbox)) {
778 printk(KERN_WARNING "update gid table failed %ld\n", PTR_ERR(mailbox));
779 return;
780 }
781
782 gids = mailbox->buf;
783 memcpy(gids, gw->gids, sizeof gw->gids);
784
785 err = mlx4_cmd(dev, mailbox->dma, MLX4_SET_PORT_GID_TABLE << 8 | gw->port,
786 1, MLX4_CMD_SET_PORT, MLX4_CMD_TIME_CLASS_B);
787 if (err)
788 printk(KERN_WARNING "set port command failed\n");
789 else {
790 memcpy(gw->dev->iboe.gid_table[gw->port - 1], gw->gids, sizeof gw->gids);
791 event.device = &gw->dev->ib_dev;
792 event.element.port_num = gw->port;
793 event.event = IB_EVENT_LID_CHANGE;
794 ib_dispatch_event(&event);
795 }
796
797 mlx4_free_cmd_mailbox(dev, mailbox);
798 kfree(gw);
799}
800
801static int update_ipv6_gids(struct mlx4_ib_dev *dev, int port, int clear)
802{
803 struct net_device *ndev = dev->iboe.netdevs[port - 1];
804 struct update_gid_work *work;
805
806 work = kzalloc(sizeof *work, GFP_ATOMIC);
807 if (!work)
808 return -ENOMEM;
809
810 if (!clear) {
811 mlx4_addrconf_ifid_eui48(&work->gids[0].raw[8], ndev);
812 work->gids[0].global.subnet_prefix = cpu_to_be64(0xfe80000000000000LL);
813 }
814
815 INIT_WORK(&work->work, update_gids_task);
816 work->port = port;
817 work->dev = dev;
818 queue_work(wq, &work->work);
819
820 return 0;
821}
822
823static void handle_en_event(struct mlx4_ib_dev *dev, int port, unsigned long event)
824{
825 switch (event) {
826 case NETDEV_UP:
827 update_ipv6_gids(dev, port, 0);
828 break;
829
830 case NETDEV_DOWN:
831 update_ipv6_gids(dev, port, 1);
832 dev->iboe.netdevs[port - 1] = NULL;
833 }
834}
835
836static void netdev_added(struct mlx4_ib_dev *dev, int port)
837{
838 update_ipv6_gids(dev, port, 0);
839}
840
841static void netdev_removed(struct mlx4_ib_dev *dev, int port)
842{
843 update_ipv6_gids(dev, port, 1);
844}
845
846static int mlx4_ib_netdev_event(struct notifier_block *this, unsigned long event,
847 void *ptr)
848{
849 struct net_device *dev = ptr;
850 struct mlx4_ib_dev *ibdev;
851 struct net_device *oldnd;
852 struct mlx4_ib_iboe *iboe;
853 int port;
854
855 if (!net_eq(dev_net(dev), &init_net))
856 return NOTIFY_DONE;
857
858 ibdev = container_of(this, struct mlx4_ib_dev, iboe.nb);
859 iboe = &ibdev->iboe;
860
861 spin_lock(&iboe->lock);
862 mlx4_foreach_ib_transport_port(port, ibdev->dev) {
863 oldnd = iboe->netdevs[port - 1];
864 iboe->netdevs[port - 1] =
865 mlx4_get_protocol_dev(ibdev->dev, MLX4_PROTOCOL_EN, port);
866 if (oldnd != iboe->netdevs[port - 1]) {
867 if (iboe->netdevs[port - 1])
868 netdev_added(ibdev, port);
869 else
870 netdev_removed(ibdev, port);
871 }
872 }
873
874 if (dev == iboe->netdevs[0])
875 handle_en_event(ibdev, 1, event);
876 else if (dev == iboe->netdevs[1])
877 handle_en_event(ibdev, 2, event);
878
879 spin_unlock(&iboe->lock);
880
881 return NOTIFY_DONE;
882}
883
225c7b1f
RD
884static void *mlx4_ib_add(struct mlx4_dev *dev)
885{
886 struct mlx4_ib_dev *ibdev;
22e7ef9c 887 int num_ports = 0;
cd9281d8 888 int i;
fa417f7b
EC
889 int err;
890 struct mlx4_ib_iboe *iboe;
225c7b1f 891
f1aa78b2 892 printk_once(KERN_INFO "%s", mlx4_ib_version);
68f3948d 893
fa417f7b 894 mlx4_foreach_ib_transport_port(i, dev)
22e7ef9c
RD
895 num_ports++;
896
897 /* No point in registering a device with no ports... */
898 if (num_ports == 0)
899 return NULL;
900
225c7b1f
RD
901 ibdev = (struct mlx4_ib_dev *) ib_alloc_device(sizeof *ibdev);
902 if (!ibdev) {
903 dev_err(&dev->pdev->dev, "Device struct alloc failed\n");
904 return NULL;
905 }
906
fa417f7b
EC
907 iboe = &ibdev->iboe;
908
225c7b1f
RD
909 if (mlx4_pd_alloc(dev, &ibdev->priv_pdn))
910 goto err_dealloc;
911
912 if (mlx4_uar_alloc(dev, &ibdev->priv_uar))
913 goto err_pd;
914
915 ibdev->uar_map = ioremap(ibdev->priv_uar.pfn << PAGE_SHIFT, PAGE_SIZE);
916 if (!ibdev->uar_map)
917 goto err_uar;
26c6bc7b 918 MLX4_INIT_DOORBELL_LOCK(&ibdev->uar_lock);
225c7b1f 919
225c7b1f
RD
920 ibdev->dev = dev;
921
922 strlcpy(ibdev->ib_dev.name, "mlx4_%d", IB_DEVICE_NAME_MAX);
923 ibdev->ib_dev.owner = THIS_MODULE;
924 ibdev->ib_dev.node_type = RDMA_NODE_IB_CA;
95d04f07 925 ibdev->ib_dev.local_dma_lkey = dev->caps.reserved_lkey;
22e7ef9c 926 ibdev->num_ports = num_ports;
7ff93f8b 927 ibdev->ib_dev.phys_port_cnt = ibdev->num_ports;
b8dd786f 928 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
225c7b1f
RD
929 ibdev->ib_dev.dma_device = &dev->pdev->dev;
930
931 ibdev->ib_dev.uverbs_abi_ver = MLX4_IB_UVERBS_ABI_VERSION;
932 ibdev->ib_dev.uverbs_cmd_mask =
933 (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) |
934 (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) |
935 (1ull << IB_USER_VERBS_CMD_QUERY_PORT) |
936 (1ull << IB_USER_VERBS_CMD_ALLOC_PD) |
937 (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) |
938 (1ull << IB_USER_VERBS_CMD_REG_MR) |
939 (1ull << IB_USER_VERBS_CMD_DEREG_MR) |
940 (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) |
941 (1ull << IB_USER_VERBS_CMD_CREATE_CQ) |
bbf8eed1 942 (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) |
225c7b1f
RD
943 (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) |
944 (1ull << IB_USER_VERBS_CMD_CREATE_QP) |
945 (1ull << IB_USER_VERBS_CMD_MODIFY_QP) |
6a775e2b 946 (1ull << IB_USER_VERBS_CMD_QUERY_QP) |
225c7b1f
RD
947 (1ull << IB_USER_VERBS_CMD_DESTROY_QP) |
948 (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) |
949 (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) |
950 (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) |
951 (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) |
65541cb7 952 (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) |
225c7b1f
RD
953 (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ);
954
955 ibdev->ib_dev.query_device = mlx4_ib_query_device;
956 ibdev->ib_dev.query_port = mlx4_ib_query_port;
fa417f7b 957 ibdev->ib_dev.get_link_layer = mlx4_ib_port_link_layer;
225c7b1f
RD
958 ibdev->ib_dev.query_gid = mlx4_ib_query_gid;
959 ibdev->ib_dev.query_pkey = mlx4_ib_query_pkey;
960 ibdev->ib_dev.modify_device = mlx4_ib_modify_device;
961 ibdev->ib_dev.modify_port = mlx4_ib_modify_port;
962 ibdev->ib_dev.alloc_ucontext = mlx4_ib_alloc_ucontext;
963 ibdev->ib_dev.dealloc_ucontext = mlx4_ib_dealloc_ucontext;
964 ibdev->ib_dev.mmap = mlx4_ib_mmap;
965 ibdev->ib_dev.alloc_pd = mlx4_ib_alloc_pd;
966 ibdev->ib_dev.dealloc_pd = mlx4_ib_dealloc_pd;
967 ibdev->ib_dev.create_ah = mlx4_ib_create_ah;
968 ibdev->ib_dev.query_ah = mlx4_ib_query_ah;
969 ibdev->ib_dev.destroy_ah = mlx4_ib_destroy_ah;
970 ibdev->ib_dev.create_srq = mlx4_ib_create_srq;
971 ibdev->ib_dev.modify_srq = mlx4_ib_modify_srq;
65541cb7 972 ibdev->ib_dev.query_srq = mlx4_ib_query_srq;
225c7b1f
RD
973 ibdev->ib_dev.destroy_srq = mlx4_ib_destroy_srq;
974 ibdev->ib_dev.post_srq_recv = mlx4_ib_post_srq_recv;
975 ibdev->ib_dev.create_qp = mlx4_ib_create_qp;
976 ibdev->ib_dev.modify_qp = mlx4_ib_modify_qp;
6a775e2b 977 ibdev->ib_dev.query_qp = mlx4_ib_query_qp;
225c7b1f
RD
978 ibdev->ib_dev.destroy_qp = mlx4_ib_destroy_qp;
979 ibdev->ib_dev.post_send = mlx4_ib_post_send;
980 ibdev->ib_dev.post_recv = mlx4_ib_post_recv;
981 ibdev->ib_dev.create_cq = mlx4_ib_create_cq;
3fdcb97f 982 ibdev->ib_dev.modify_cq = mlx4_ib_modify_cq;
bbf8eed1 983 ibdev->ib_dev.resize_cq = mlx4_ib_resize_cq;
225c7b1f
RD
984 ibdev->ib_dev.destroy_cq = mlx4_ib_destroy_cq;
985 ibdev->ib_dev.poll_cq = mlx4_ib_poll_cq;
986 ibdev->ib_dev.req_notify_cq = mlx4_ib_arm_cq;
987 ibdev->ib_dev.get_dma_mr = mlx4_ib_get_dma_mr;
988 ibdev->ib_dev.reg_user_mr = mlx4_ib_reg_user_mr;
989 ibdev->ib_dev.dereg_mr = mlx4_ib_dereg_mr;
95d04f07
RD
990 ibdev->ib_dev.alloc_fast_reg_mr = mlx4_ib_alloc_fast_reg_mr;
991 ibdev->ib_dev.alloc_fast_reg_page_list = mlx4_ib_alloc_fast_reg_page_list;
992 ibdev->ib_dev.free_fast_reg_page_list = mlx4_ib_free_fast_reg_page_list;
225c7b1f
RD
993 ibdev->ib_dev.attach_mcast = mlx4_ib_mcg_attach;
994 ibdev->ib_dev.detach_mcast = mlx4_ib_mcg_detach;
995 ibdev->ib_dev.process_mad = mlx4_ib_process_mad;
996
8ad11fb6
JM
997 ibdev->ib_dev.alloc_fmr = mlx4_ib_fmr_alloc;
998 ibdev->ib_dev.map_phys_fmr = mlx4_ib_map_phys_fmr;
999 ibdev->ib_dev.unmap_fmr = mlx4_ib_unmap_fmr;
1000 ibdev->ib_dev.dealloc_fmr = mlx4_ib_fmr_dealloc;
1001
fa417f7b
EC
1002 spin_lock_init(&iboe->lock);
1003
225c7b1f
RD
1004 if (init_node_data(ibdev))
1005 goto err_map;
1006
1007 spin_lock_init(&ibdev->sm_lock);
1008 mutex_init(&ibdev->cap_mask_mutex);
1009
9a6edb60 1010 if (ib_register_device(&ibdev->ib_dev, NULL))
225c7b1f
RD
1011 goto err_map;
1012
1013 if (mlx4_ib_mad_init(ibdev))
1014 goto err_reg;
1015
fa417f7b
EC
1016 if (dev->caps.flags & MLX4_DEV_CAP_FLAG_IBOE && !iboe->nb.notifier_call) {
1017 iboe->nb.notifier_call = mlx4_ib_netdev_event;
1018 err = register_netdevice_notifier(&iboe->nb);
1019 if (err)
1020 goto err_reg;
1021 }
1022
cd9281d8 1023 for (i = 0; i < ARRAY_SIZE(mlx4_class_attributes); ++i) {
f4e91eb4
TJ
1024 if (device_create_file(&ibdev->ib_dev.dev,
1025 mlx4_class_attributes[i]))
fa417f7b 1026 goto err_notif;
cd9281d8
JM
1027 }
1028
3b4a8cd5
JM
1029 ibdev->ib_active = true;
1030
225c7b1f
RD
1031 return ibdev;
1032
fa417f7b
EC
1033err_notif:
1034 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1035 printk(KERN_WARNING "failure unregistering notifier\n");
1036 flush_workqueue(wq);
1037
225c7b1f
RD
1038err_reg:
1039 ib_unregister_device(&ibdev->ib_dev);
1040
1041err_map:
1042 iounmap(ibdev->uar_map);
1043
1044err_uar:
1045 mlx4_uar_free(dev, &ibdev->priv_uar);
1046
1047err_pd:
1048 mlx4_pd_free(dev, ibdev->priv_pdn);
1049
1050err_dealloc:
1051 ib_dealloc_device(&ibdev->ib_dev);
1052
1053 return NULL;
1054}
1055
1056static void mlx4_ib_remove(struct mlx4_dev *dev, void *ibdev_ptr)
1057{
1058 struct mlx4_ib_dev *ibdev = ibdev_ptr;
1059 int p;
1060
a6a47771
YP
1061 mlx4_ib_mad_cleanup(ibdev);
1062 ib_unregister_device(&ibdev->ib_dev);
fa417f7b
EC
1063 if (ibdev->iboe.nb.notifier_call) {
1064 if (unregister_netdevice_notifier(&ibdev->iboe.nb))
1065 printk(KERN_WARNING "failure unregistering notifier\n");
1066 ibdev->iboe.nb.notifier_call = NULL;
1067 }
1068 iounmap(ibdev->uar_map);
a6a47771 1069
fa417f7b 1070 mlx4_foreach_port(p, dev, MLX4_PORT_TYPE_IB)
225c7b1f
RD
1071 mlx4_CLOSE_PORT(dev, p);
1072
225c7b1f
RD
1073 mlx4_uar_free(dev, &ibdev->priv_uar);
1074 mlx4_pd_free(dev, ibdev->priv_pdn);
1075 ib_dealloc_device(&ibdev->ib_dev);
1076}
1077
1078static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr,
37608eea 1079 enum mlx4_dev_event event, int port)
225c7b1f
RD
1080{
1081 struct ib_event ibev;
7ff93f8b
YP
1082 struct mlx4_ib_dev *ibdev = to_mdev((struct ib_device *) ibdev_ptr);
1083
1084 if (port > ibdev->num_ports)
1085 return;
225c7b1f
RD
1086
1087 switch (event) {
37608eea
RD
1088 case MLX4_DEV_EVENT_PORT_UP:
1089 ibev.event = IB_EVENT_PORT_ACTIVE;
225c7b1f
RD
1090 break;
1091
37608eea
RD
1092 case MLX4_DEV_EVENT_PORT_DOWN:
1093 ibev.event = IB_EVENT_PORT_ERR;
1094 break;
1095
1096 case MLX4_DEV_EVENT_CATASTROPHIC_ERROR:
3b4a8cd5 1097 ibdev->ib_active = false;
225c7b1f
RD
1098 ibev.event = IB_EVENT_DEVICE_FATAL;
1099 break;
1100
1101 default:
1102 return;
1103 }
1104
1105 ibev.device = ibdev_ptr;
1106 ibev.element.port_num = port;
1107
1108 ib_dispatch_event(&ibev);
1109}
1110
1111static struct mlx4_interface mlx4_ib_interface = {
fa417f7b
EC
1112 .add = mlx4_ib_add,
1113 .remove = mlx4_ib_remove,
1114 .event = mlx4_ib_event,
1115 .protocol = MLX4_PROTOCOL_IB
225c7b1f
RD
1116};
1117
1118static int __init mlx4_ib_init(void)
1119{
fa417f7b
EC
1120 int err;
1121
1122 wq = create_singlethread_workqueue("mlx4_ib");
1123 if (!wq)
1124 return -ENOMEM;
1125
1126 err = mlx4_register_interface(&mlx4_ib_interface);
1127 if (err) {
1128 destroy_workqueue(wq);
1129 return err;
1130 }
1131
1132 return 0;
225c7b1f
RD
1133}
1134
1135static void __exit mlx4_ib_cleanup(void)
1136{
1137 mlx4_unregister_interface(&mlx4_ib_interface);
fa417f7b 1138 destroy_workqueue(wq);
225c7b1f
RD
1139}
1140
1141module_init(mlx4_ib_init);
1142module_exit(mlx4_ib_cleanup);