]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2004 Topspin Communications. All rights reserved. | |
2a1d9b7f RD |
3 | * Copyright (c) 2005 Intel Corporation. All rights reserved. |
4 | * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. | |
5 | * Copyright (c) 2005 Voltaire, Inc. All rights reserved. | |
1da177e4 LT |
6 | * |
7 | * This software is available to you under a choice of one of two | |
8 | * licenses. You may choose to be licensed under the terms of the GNU | |
9 | * General Public License (GPL) Version 2, available from the file | |
10 | * COPYING in the main directory of this source tree, or the | |
11 | * OpenIB.org BSD license below: | |
12 | * | |
13 | * Redistribution and use in source and binary forms, with or | |
14 | * without modification, are permitted provided that the following | |
15 | * conditions are met: | |
16 | * | |
17 | * - Redistributions of source code must retain the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer. | |
20 | * | |
21 | * - Redistributions in binary form must reproduce the above | |
22 | * copyright notice, this list of conditions and the following | |
23 | * disclaimer in the documentation and/or other materials | |
24 | * provided with the distribution. | |
25 | * | |
26 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | |
27 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | |
28 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | |
29 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | |
30 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | |
31 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | |
32 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | |
33 | * SOFTWARE. | |
1da177e4 LT |
34 | */ |
35 | ||
1da177e4 LT |
36 | #include <linux/module.h> |
37 | #include <linux/errno.h> | |
38 | #include <linux/slab.h> | |
e8edc6e0 | 39 | #include <linux/workqueue.h> |
1da177e4 | 40 | |
a4d61e84 | 41 | #include <rdma/ib_cache.h> |
1da177e4 LT |
42 | |
43 | #include "core_priv.h" | |
44 | ||
45 | struct ib_pkey_cache { | |
46 | int table_len; | |
47 | u16 table[0]; | |
48 | }; | |
49 | ||
50 | struct ib_gid_cache { | |
51 | int table_len; | |
52 | union ib_gid table[0]; | |
53 | }; | |
54 | ||
55 | struct ib_update_work { | |
56 | struct work_struct work; | |
57 | struct ib_device *device; | |
58 | u8 port_num; | |
59 | }; | |
60 | ||
61 | static inline int start_port(struct ib_device *device) | |
62 | { | |
07ebafba | 63 | return (device->node_type == RDMA_NODE_IB_SWITCH) ? 0 : 1; |
1da177e4 LT |
64 | } |
65 | ||
66 | static inline int end_port(struct ib_device *device) | |
67 | { | |
07ebafba TT |
68 | return (device->node_type == RDMA_NODE_IB_SWITCH) ? |
69 | 0 : device->phys_port_cnt; | |
1da177e4 LT |
70 | } |
71 | ||
72 | int ib_get_cached_gid(struct ib_device *device, | |
73 | u8 port_num, | |
74 | int index, | |
75 | union ib_gid *gid) | |
76 | { | |
77 | struct ib_gid_cache *cache; | |
78 | unsigned long flags; | |
79 | int ret = 0; | |
80 | ||
81 | if (port_num < start_port(device) || port_num > end_port(device)) | |
82 | return -EINVAL; | |
83 | ||
84 | read_lock_irqsave(&device->cache.lock, flags); | |
85 | ||
86 | cache = device->cache.gid_cache[port_num - start_port(device)]; | |
87 | ||
88 | if (index < 0 || index >= cache->table_len) | |
89 | ret = -EINVAL; | |
90 | else | |
91 | *gid = cache->table[index]; | |
92 | ||
93 | read_unlock_irqrestore(&device->cache.lock, flags); | |
94 | ||
95 | return ret; | |
96 | } | |
97 | EXPORT_SYMBOL(ib_get_cached_gid); | |
98 | ||
99 | int ib_find_cached_gid(struct ib_device *device, | |
100 | union ib_gid *gid, | |
101 | u8 *port_num, | |
102 | u16 *index) | |
103 | { | |
104 | struct ib_gid_cache *cache; | |
105 | unsigned long flags; | |
106 | int p, i; | |
107 | int ret = -ENOENT; | |
108 | ||
109 | *port_num = -1; | |
110 | if (index) | |
111 | *index = -1; | |
112 | ||
113 | read_lock_irqsave(&device->cache.lock, flags); | |
114 | ||
115 | for (p = 0; p <= end_port(device) - start_port(device); ++p) { | |
116 | cache = device->cache.gid_cache[p]; | |
117 | for (i = 0; i < cache->table_len; ++i) { | |
118 | if (!memcmp(gid, &cache->table[i], sizeof *gid)) { | |
119 | *port_num = p + start_port(device); | |
120 | if (index) | |
121 | *index = i; | |
122 | ret = 0; | |
123 | goto found; | |
124 | } | |
125 | } | |
126 | } | |
127 | found: | |
128 | read_unlock_irqrestore(&device->cache.lock, flags); | |
129 | ||
130 | return ret; | |
131 | } | |
132 | EXPORT_SYMBOL(ib_find_cached_gid); | |
133 | ||
134 | int ib_get_cached_pkey(struct ib_device *device, | |
135 | u8 port_num, | |
136 | int index, | |
137 | u16 *pkey) | |
138 | { | |
139 | struct ib_pkey_cache *cache; | |
140 | unsigned long flags; | |
141 | int ret = 0; | |
142 | ||
143 | if (port_num < start_port(device) || port_num > end_port(device)) | |
144 | return -EINVAL; | |
145 | ||
146 | read_lock_irqsave(&device->cache.lock, flags); | |
147 | ||
148 | cache = device->cache.pkey_cache[port_num - start_port(device)]; | |
149 | ||
150 | if (index < 0 || index >= cache->table_len) | |
151 | ret = -EINVAL; | |
152 | else | |
153 | *pkey = cache->table[index]; | |
154 | ||
155 | read_unlock_irqrestore(&device->cache.lock, flags); | |
156 | ||
157 | return ret; | |
158 | } | |
159 | EXPORT_SYMBOL(ib_get_cached_pkey); | |
160 | ||
161 | int ib_find_cached_pkey(struct ib_device *device, | |
162 | u8 port_num, | |
163 | u16 pkey, | |
164 | u16 *index) | |
165 | { | |
166 | struct ib_pkey_cache *cache; | |
167 | unsigned long flags; | |
168 | int i; | |
169 | int ret = -ENOENT; | |
170 | ||
171 | if (port_num < start_port(device) || port_num > end_port(device)) | |
172 | return -EINVAL; | |
173 | ||
174 | read_lock_irqsave(&device->cache.lock, flags); | |
175 | ||
176 | cache = device->cache.pkey_cache[port_num - start_port(device)]; | |
177 | ||
178 | *index = -1; | |
179 | ||
180 | for (i = 0; i < cache->table_len; ++i) | |
181 | if ((cache->table[i] & 0x7fff) == (pkey & 0x7fff)) { | |
182 | *index = i; | |
183 | ret = 0; | |
184 | break; | |
185 | } | |
186 | ||
187 | read_unlock_irqrestore(&device->cache.lock, flags); | |
188 | ||
189 | return ret; | |
190 | } | |
191 | EXPORT_SYMBOL(ib_find_cached_pkey); | |
192 | ||
6fb9cdbf JM |
193 | int ib_get_cached_lmc(struct ib_device *device, |
194 | u8 port_num, | |
195 | u8 *lmc) | |
196 | { | |
197 | unsigned long flags; | |
198 | int ret = 0; | |
199 | ||
200 | if (port_num < start_port(device) || port_num > end_port(device)) | |
201 | return -EINVAL; | |
202 | ||
203 | read_lock_irqsave(&device->cache.lock, flags); | |
204 | *lmc = device->cache.lmc_cache[port_num - start_port(device)]; | |
205 | read_unlock_irqrestore(&device->cache.lock, flags); | |
206 | ||
207 | return ret; | |
208 | } | |
209 | EXPORT_SYMBOL(ib_get_cached_lmc); | |
210 | ||
1da177e4 LT |
211 | static void ib_cache_update(struct ib_device *device, |
212 | u8 port) | |
213 | { | |
214 | struct ib_port_attr *tprops = NULL; | |
215 | struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; | |
216 | struct ib_gid_cache *gid_cache = NULL, *old_gid_cache; | |
217 | int i; | |
218 | int ret; | |
219 | ||
220 | tprops = kmalloc(sizeof *tprops, GFP_KERNEL); | |
221 | if (!tprops) | |
222 | return; | |
223 | ||
224 | ret = ib_query_port(device, port, tprops); | |
225 | if (ret) { | |
226 | printk(KERN_WARNING "ib_query_port failed (%d) for %s\n", | |
227 | ret, device->name); | |
228 | goto err; | |
229 | } | |
230 | ||
231 | pkey_cache = kmalloc(sizeof *pkey_cache + tprops->pkey_tbl_len * | |
232 | sizeof *pkey_cache->table, GFP_KERNEL); | |
233 | if (!pkey_cache) | |
234 | goto err; | |
235 | ||
236 | pkey_cache->table_len = tprops->pkey_tbl_len; | |
237 | ||
238 | gid_cache = kmalloc(sizeof *gid_cache + tprops->gid_tbl_len * | |
239 | sizeof *gid_cache->table, GFP_KERNEL); | |
240 | if (!gid_cache) | |
241 | goto err; | |
242 | ||
243 | gid_cache->table_len = tprops->gid_tbl_len; | |
244 | ||
245 | for (i = 0; i < pkey_cache->table_len; ++i) { | |
246 | ret = ib_query_pkey(device, port, i, pkey_cache->table + i); | |
247 | if (ret) { | |
248 | printk(KERN_WARNING "ib_query_pkey failed (%d) for %s (index %d)\n", | |
249 | ret, device->name, i); | |
250 | goto err; | |
251 | } | |
252 | } | |
253 | ||
254 | for (i = 0; i < gid_cache->table_len; ++i) { | |
255 | ret = ib_query_gid(device, port, i, gid_cache->table + i); | |
256 | if (ret) { | |
257 | printk(KERN_WARNING "ib_query_gid failed (%d) for %s (index %d)\n", | |
258 | ret, device->name, i); | |
259 | goto err; | |
260 | } | |
261 | } | |
262 | ||
263 | write_lock_irq(&device->cache.lock); | |
264 | ||
265 | old_pkey_cache = device->cache.pkey_cache[port - start_port(device)]; | |
266 | old_gid_cache = device->cache.gid_cache [port - start_port(device)]; | |
267 | ||
268 | device->cache.pkey_cache[port - start_port(device)] = pkey_cache; | |
269 | device->cache.gid_cache [port - start_port(device)] = gid_cache; | |
270 | ||
6fb9cdbf JM |
271 | device->cache.lmc_cache[port - start_port(device)] = tprops->lmc; |
272 | ||
1da177e4 LT |
273 | write_unlock_irq(&device->cache.lock); |
274 | ||
275 | kfree(old_pkey_cache); | |
276 | kfree(old_gid_cache); | |
277 | kfree(tprops); | |
278 | return; | |
279 | ||
280 | err: | |
281 | kfree(pkey_cache); | |
282 | kfree(gid_cache); | |
283 | kfree(tprops); | |
284 | } | |
285 | ||
c4028958 | 286 | static void ib_cache_task(struct work_struct *_work) |
1da177e4 | 287 | { |
c4028958 DH |
288 | struct ib_update_work *work = |
289 | container_of(_work, struct ib_update_work, work); | |
1da177e4 LT |
290 | |
291 | ib_cache_update(work->device, work->port_num); | |
292 | kfree(work); | |
293 | } | |
294 | ||
295 | static void ib_cache_event(struct ib_event_handler *handler, | |
296 | struct ib_event *event) | |
297 | { | |
298 | struct ib_update_work *work; | |
299 | ||
300 | if (event->event == IB_EVENT_PORT_ERR || | |
301 | event->event == IB_EVENT_PORT_ACTIVE || | |
302 | event->event == IB_EVENT_LID_CHANGE || | |
303 | event->event == IB_EVENT_PKEY_CHANGE || | |
acaea9ee JM |
304 | event->event == IB_EVENT_SM_CHANGE || |
305 | event->event == IB_EVENT_CLIENT_REREGISTER) { | |
1da177e4 LT |
306 | work = kmalloc(sizeof *work, GFP_ATOMIC); |
307 | if (work) { | |
c4028958 | 308 | INIT_WORK(&work->work, ib_cache_task); |
1da177e4 LT |
309 | work->device = event->device; |
310 | work->port_num = event->element.port_num; | |
311 | schedule_work(&work->work); | |
312 | } | |
313 | } | |
314 | } | |
315 | ||
316 | static void ib_cache_setup_one(struct ib_device *device) | |
317 | { | |
318 | int p; | |
319 | ||
320 | rwlock_init(&device->cache.lock); | |
321 | ||
322 | device->cache.pkey_cache = | |
323 | kmalloc(sizeof *device->cache.pkey_cache * | |
324 | (end_port(device) - start_port(device) + 1), GFP_KERNEL); | |
325 | device->cache.gid_cache = | |
ce684df0 | 326 | kmalloc(sizeof *device->cache.gid_cache * |
1da177e4 LT |
327 | (end_port(device) - start_port(device) + 1), GFP_KERNEL); |
328 | ||
6fb9cdbf JM |
329 | device->cache.lmc_cache = kmalloc(sizeof *device->cache.lmc_cache * |
330 | (end_port(device) - | |
331 | start_port(device) + 1), | |
332 | GFP_KERNEL); | |
333 | ||
334 | if (!device->cache.pkey_cache || !device->cache.gid_cache || | |
335 | !device->cache.lmc_cache) { | |
1da177e4 LT |
336 | printk(KERN_WARNING "Couldn't allocate cache " |
337 | "for %s\n", device->name); | |
338 | goto err; | |
339 | } | |
340 | ||
341 | for (p = 0; p <= end_port(device) - start_port(device); ++p) { | |
342 | device->cache.pkey_cache[p] = NULL; | |
343 | device->cache.gid_cache [p] = NULL; | |
344 | ib_cache_update(device, p + start_port(device)); | |
345 | } | |
346 | ||
347 | INIT_IB_EVENT_HANDLER(&device->cache.event_handler, | |
348 | device, ib_cache_event); | |
349 | if (ib_register_event_handler(&device->cache.event_handler)) | |
350 | goto err_cache; | |
351 | ||
352 | return; | |
353 | ||
354 | err_cache: | |
355 | for (p = 0; p <= end_port(device) - start_port(device); ++p) { | |
356 | kfree(device->cache.pkey_cache[p]); | |
357 | kfree(device->cache.gid_cache[p]); | |
358 | } | |
359 | ||
360 | err: | |
361 | kfree(device->cache.pkey_cache); | |
362 | kfree(device->cache.gid_cache); | |
6fb9cdbf | 363 | kfree(device->cache.lmc_cache); |
1da177e4 LT |
364 | } |
365 | ||
366 | static void ib_cache_cleanup_one(struct ib_device *device) | |
367 | { | |
368 | int p; | |
369 | ||
370 | ib_unregister_event_handler(&device->cache.event_handler); | |
371 | flush_scheduled_work(); | |
372 | ||
373 | for (p = 0; p <= end_port(device) - start_port(device); ++p) { | |
374 | kfree(device->cache.pkey_cache[p]); | |
375 | kfree(device->cache.gid_cache[p]); | |
376 | } | |
377 | ||
378 | kfree(device->cache.pkey_cache); | |
379 | kfree(device->cache.gid_cache); | |
6fb9cdbf | 380 | kfree(device->cache.lmc_cache); |
1da177e4 LT |
381 | } |
382 | ||
383 | static struct ib_client cache_client = { | |
384 | .name = "cache", | |
385 | .add = ib_cache_setup_one, | |
386 | .remove = ib_cache_cleanup_one | |
387 | }; | |
388 | ||
389 | int __init ib_cache_setup(void) | |
390 | { | |
391 | return ib_register_client(&cache_client); | |
392 | } | |
393 | ||
394 | void __exit ib_cache_cleanup(void) | |
395 | { | |
396 | ib_unregister_client(&cache_client); | |
397 | } |