]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/rds/ib_rdma.c
3a275af9d52fe37aec28c021400aff103621e776
[net-next-2.6.git] / net / rds / ib_rdma.c
1 /*
2  * Copyright (c) 2006 Oracle.  All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  *
32  */
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
35 #include <linux/rculist.h>
36
37 #include "rds.h"
38 #include "ib.h"
39 #include "xlist.h"
40
41 static DEFINE_PER_CPU(unsigned long, clean_list_grace);
42 #define CLEAN_LIST_BUSY_BIT 0
43
44 /*
45  * This is stored as mr->r_trans_private.
46  */
47 struct rds_ib_mr {
48         struct rds_ib_device    *device;
49         struct rds_ib_mr_pool   *pool;
50         struct ib_fmr           *fmr;
51
52         struct xlist_head       xlist;
53
54         /* unmap_list is for freeing */
55         struct list_head        unmap_list;
56         unsigned int            remap_count;
57
58         struct scatterlist      *sg;
59         unsigned int            sg_len;
60         u64                     *dma;
61         int                     sg_dma_len;
62 };
63
64 /*
65  * Our own little FMR pool
66  */
67 struct rds_ib_mr_pool {
68         struct mutex            flush_lock;             /* serialize fmr invalidate */
69         struct delayed_work     flush_worker;           /* flush worker */
70
71         atomic_t                item_count;             /* total # of MRs */
72         atomic_t                dirty_count;            /* # dirty of MRs */
73
74         struct xlist_head       drop_list;              /* MRs that have reached their max_maps limit */
75         struct xlist_head       free_list;              /* unused MRs */
76         struct xlist_head       clean_list;             /* global unused & unamapped MRs */
77         wait_queue_head_t       flush_wait;
78
79         atomic_t                free_pinned;            /* memory pinned by free MRs */
80         unsigned long           max_items;
81         unsigned long           max_items_soft;
82         unsigned long           max_free_pinned;
83         struct ib_fmr_attr      fmr_attr;
84 };
85
86 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool, int free_all, struct rds_ib_mr **);
87 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr);
88 static void rds_ib_mr_pool_flush_worker(struct work_struct *work);
89
90 static struct rds_ib_device *rds_ib_get_device(__be32 ipaddr)
91 {
92         struct rds_ib_device *rds_ibdev;
93         struct rds_ib_ipaddr *i_ipaddr;
94
95         list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
96                 rcu_read_lock();
97                 list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
98                         if (i_ipaddr->ipaddr == ipaddr) {
99                                 atomic_inc(&rds_ibdev->refcount);
100                                 rcu_read_unlock();
101                                 return rds_ibdev;
102                         }
103                 }
104                 rcu_read_unlock();
105         }
106
107         return NULL;
108 }
109
110 static int rds_ib_add_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
111 {
112         struct rds_ib_ipaddr *i_ipaddr;
113
114         i_ipaddr = kmalloc(sizeof *i_ipaddr, GFP_KERNEL);
115         if (!i_ipaddr)
116                 return -ENOMEM;
117
118         i_ipaddr->ipaddr = ipaddr;
119
120         spin_lock_irq(&rds_ibdev->spinlock);
121         list_add_tail_rcu(&i_ipaddr->list, &rds_ibdev->ipaddr_list);
122         spin_unlock_irq(&rds_ibdev->spinlock);
123
124         return 0;
125 }
126
127 static void rds_ib_remove_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
128 {
129         struct rds_ib_ipaddr *i_ipaddr;
130         struct rds_ib_ipaddr *to_free = NULL;
131
132
133         spin_lock_irq(&rds_ibdev->spinlock);
134         list_for_each_entry_rcu(i_ipaddr, &rds_ibdev->ipaddr_list, list) {
135                 if (i_ipaddr->ipaddr == ipaddr) {
136                         list_del_rcu(&i_ipaddr->list);
137                         to_free = i_ipaddr;
138                         break;
139                 }
140         }
141         spin_unlock_irq(&rds_ibdev->spinlock);
142
143         if (to_free) {
144                 synchronize_rcu();
145                 kfree(to_free);
146         }
147 }
148
149 int rds_ib_update_ipaddr(struct rds_ib_device *rds_ibdev, __be32 ipaddr)
150 {
151         struct rds_ib_device *rds_ibdev_old;
152
153         rds_ibdev_old = rds_ib_get_device(ipaddr);
154         if (rds_ibdev_old) {
155                 rds_ib_remove_ipaddr(rds_ibdev_old, ipaddr);
156                 rds_ib_dev_put(rds_ibdev_old);
157         }
158
159         return rds_ib_add_ipaddr(rds_ibdev, ipaddr);
160 }
161
162 void rds_ib_add_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
163 {
164         struct rds_ib_connection *ic = conn->c_transport_data;
165
166         /* conn was previously on the nodev_conns_list */
167         spin_lock_irq(&ib_nodev_conns_lock);
168         BUG_ON(list_empty(&ib_nodev_conns));
169         BUG_ON(list_empty(&ic->ib_node));
170         list_del(&ic->ib_node);
171
172         spin_lock_irq(&rds_ibdev->spinlock);
173         list_add_tail(&ic->ib_node, &rds_ibdev->conn_list);
174         spin_unlock_irq(&rds_ibdev->spinlock);
175         spin_unlock_irq(&ib_nodev_conns_lock);
176
177         ic->rds_ibdev = rds_ibdev;
178         atomic_inc(&rds_ibdev->refcount);
179 }
180
181 void rds_ib_remove_conn(struct rds_ib_device *rds_ibdev, struct rds_connection *conn)
182 {
183         struct rds_ib_connection *ic = conn->c_transport_data;
184
185         /* place conn on nodev_conns_list */
186         spin_lock(&ib_nodev_conns_lock);
187
188         spin_lock_irq(&rds_ibdev->spinlock);
189         BUG_ON(list_empty(&ic->ib_node));
190         list_del(&ic->ib_node);
191         spin_unlock_irq(&rds_ibdev->spinlock);
192
193         list_add_tail(&ic->ib_node, &ib_nodev_conns);
194
195         spin_unlock(&ib_nodev_conns_lock);
196
197         ic->rds_ibdev = NULL;
198         rds_ib_dev_put(rds_ibdev);
199 }
200
201 void __rds_ib_destroy_conns(struct list_head *list, spinlock_t *list_lock)
202 {
203         struct rds_ib_connection *ic, *_ic;
204         LIST_HEAD(tmp_list);
205
206         /* avoid calling conn_destroy with irqs off */
207         spin_lock_irq(list_lock);
208         list_splice(list, &tmp_list);
209         INIT_LIST_HEAD(list);
210         spin_unlock_irq(list_lock);
211
212         list_for_each_entry_safe(ic, _ic, &tmp_list, ib_node)
213                 rds_conn_destroy(ic->conn);
214 }
215
216 struct rds_ib_mr_pool *rds_ib_create_mr_pool(struct rds_ib_device *rds_ibdev)
217 {
218         struct rds_ib_mr_pool *pool;
219
220         pool = kzalloc(sizeof(*pool), GFP_KERNEL);
221         if (!pool)
222                 return ERR_PTR(-ENOMEM);
223
224         INIT_XLIST_HEAD(&pool->free_list);
225         INIT_XLIST_HEAD(&pool->drop_list);
226         INIT_XLIST_HEAD(&pool->clean_list);
227         mutex_init(&pool->flush_lock);
228         init_waitqueue_head(&pool->flush_wait);
229         INIT_DELAYED_WORK(&pool->flush_worker, rds_ib_mr_pool_flush_worker);
230
231         pool->fmr_attr.max_pages = fmr_message_size;
232         pool->fmr_attr.max_maps = rds_ibdev->fmr_max_remaps;
233         pool->fmr_attr.page_shift = PAGE_SHIFT;
234         pool->max_free_pinned = rds_ibdev->max_fmrs * fmr_message_size / 4;
235
236         /* We never allow more than max_items MRs to be allocated.
237          * When we exceed more than max_items_soft, we start freeing
238          * items more aggressively.
239          * Make sure that max_items > max_items_soft > max_items / 2
240          */
241         pool->max_items_soft = rds_ibdev->max_fmrs * 3 / 4;
242         pool->max_items = rds_ibdev->max_fmrs;
243
244         return pool;
245 }
246
247 void rds_ib_get_mr_info(struct rds_ib_device *rds_ibdev, struct rds_info_rdma_connection *iinfo)
248 {
249         struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
250
251         iinfo->rdma_mr_max = pool->max_items;
252         iinfo->rdma_mr_size = pool->fmr_attr.max_pages;
253 }
254
255 void rds_ib_destroy_mr_pool(struct rds_ib_mr_pool *pool)
256 {
257         cancel_delayed_work_sync(&pool->flush_worker);
258         rds_ib_flush_mr_pool(pool, 1, NULL);
259         WARN_ON(atomic_read(&pool->item_count));
260         WARN_ON(atomic_read(&pool->free_pinned));
261         kfree(pool);
262 }
263
264 static void refill_local(struct rds_ib_mr_pool *pool, struct xlist_head *xl,
265                          struct rds_ib_mr **ibmr_ret)
266 {
267         struct xlist_head *ibmr_xl;
268         ibmr_xl = xlist_del_head_fast(xl);
269         *ibmr_ret = list_entry(ibmr_xl, struct rds_ib_mr, xlist);
270 }
271
272 static inline struct rds_ib_mr *rds_ib_reuse_fmr(struct rds_ib_mr_pool *pool)
273 {
274         struct rds_ib_mr *ibmr = NULL;
275         struct xlist_head *ret;
276         unsigned long *flag;
277
278         preempt_disable();
279         flag = &__get_cpu_var(clean_list_grace);
280         set_bit(CLEAN_LIST_BUSY_BIT, flag);
281         ret = xlist_del_head(&pool->clean_list);
282         if (ret)
283                 ibmr = list_entry(ret, struct rds_ib_mr, xlist);
284
285         clear_bit(CLEAN_LIST_BUSY_BIT, flag);
286         preempt_enable();
287         return ibmr;
288 }
289
290 static inline void wait_clean_list_grace(void)
291 {
292         int cpu;
293         unsigned long *flag;
294
295         for_each_online_cpu(cpu) {
296                 flag = &per_cpu(clean_list_grace, cpu);
297                 while (test_bit(CLEAN_LIST_BUSY_BIT, flag))
298                         cpu_relax();
299         }
300 }
301
302 static struct rds_ib_mr *rds_ib_alloc_fmr(struct rds_ib_device *rds_ibdev)
303 {
304         struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
305         struct rds_ib_mr *ibmr = NULL;
306         int err = 0, iter = 0;
307
308         while (1) {
309                 ibmr = rds_ib_reuse_fmr(pool);
310                 if (ibmr)
311                         return ibmr;
312
313                 /* No clean MRs - now we have the choice of either
314                  * allocating a fresh MR up to the limit imposed by the
315                  * driver, or flush any dirty unused MRs.
316                  * We try to avoid stalling in the send path if possible,
317                  * so we allocate as long as we're allowed to.
318                  *
319                  * We're fussy with enforcing the FMR limit, though. If the driver
320                  * tells us we can't use more than N fmrs, we shouldn't start
321                  * arguing with it */
322                 if (atomic_inc_return(&pool->item_count) <= pool->max_items)
323                         break;
324
325                 atomic_dec(&pool->item_count);
326
327                 if (++iter > 2) {
328                         rds_ib_stats_inc(s_ib_rdma_mr_pool_depleted);
329                         return ERR_PTR(-EAGAIN);
330                 }
331
332                 /* We do have some empty MRs. Flush them out. */
333                 rds_ib_stats_inc(s_ib_rdma_mr_pool_wait);
334                 rds_ib_flush_mr_pool(pool, 0, &ibmr);
335                 if (ibmr)
336                         return ibmr;
337         }
338
339         ibmr = kzalloc_node(sizeof(*ibmr), GFP_KERNEL, rdsibdev_to_node(rds_ibdev));
340         if (!ibmr) {
341                 err = -ENOMEM;
342                 goto out_no_cigar;
343         }
344
345         memset(ibmr, 0, sizeof(*ibmr));
346
347         ibmr->fmr = ib_alloc_fmr(rds_ibdev->pd,
348                         (IB_ACCESS_LOCAL_WRITE |
349                          IB_ACCESS_REMOTE_READ |
350                          IB_ACCESS_REMOTE_WRITE|
351                          IB_ACCESS_REMOTE_ATOMIC),
352
353                         &pool->fmr_attr);
354         if (IS_ERR(ibmr->fmr)) {
355                 err = PTR_ERR(ibmr->fmr);
356                 ibmr->fmr = NULL;
357                 printk(KERN_WARNING "RDS/IB: ib_alloc_fmr failed (err=%d)\n", err);
358                 goto out_no_cigar;
359         }
360
361         rds_ib_stats_inc(s_ib_rdma_mr_alloc);
362         return ibmr;
363
364 out_no_cigar:
365         if (ibmr) {
366                 if (ibmr->fmr)
367                         ib_dealloc_fmr(ibmr->fmr);
368                 kfree(ibmr);
369         }
370         atomic_dec(&pool->item_count);
371         return ERR_PTR(err);
372 }
373
374 static int rds_ib_map_fmr(struct rds_ib_device *rds_ibdev, struct rds_ib_mr *ibmr,
375                struct scatterlist *sg, unsigned int nents)
376 {
377         struct ib_device *dev = rds_ibdev->dev;
378         struct scatterlist *scat = sg;
379         u64 io_addr = 0;
380         u64 *dma_pages;
381         u32 len;
382         int page_cnt, sg_dma_len;
383         int i, j;
384         int ret;
385
386         sg_dma_len = ib_dma_map_sg(dev, sg, nents,
387                                  DMA_BIDIRECTIONAL);
388         if (unlikely(!sg_dma_len)) {
389                 printk(KERN_WARNING "RDS/IB: dma_map_sg failed!\n");
390                 return -EBUSY;
391         }
392
393         len = 0;
394         page_cnt = 0;
395
396         for (i = 0; i < sg_dma_len; ++i) {
397                 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
398                 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
399
400                 if (dma_addr & ~PAGE_MASK) {
401                         if (i > 0)
402                                 return -EINVAL;
403                         else
404                                 ++page_cnt;
405                 }
406                 if ((dma_addr + dma_len) & ~PAGE_MASK) {
407                         if (i < sg_dma_len - 1)
408                                 return -EINVAL;
409                         else
410                                 ++page_cnt;
411                 }
412
413                 len += dma_len;
414         }
415
416         page_cnt += len >> PAGE_SHIFT;
417         if (page_cnt > fmr_message_size)
418                 return -EINVAL;
419
420         dma_pages = kmalloc_node(sizeof(u64) * page_cnt, GFP_ATOMIC,
421                                  rdsibdev_to_node(rds_ibdev));
422         if (!dma_pages)
423                 return -ENOMEM;
424
425         page_cnt = 0;
426         for (i = 0; i < sg_dma_len; ++i) {
427                 unsigned int dma_len = ib_sg_dma_len(dev, &scat[i]);
428                 u64 dma_addr = ib_sg_dma_address(dev, &scat[i]);
429
430                 for (j = 0; j < dma_len; j += PAGE_SIZE)
431                         dma_pages[page_cnt++] =
432                                 (dma_addr & PAGE_MASK) + j;
433         }
434
435         ret = ib_map_phys_fmr(ibmr->fmr,
436                                    dma_pages, page_cnt, io_addr);
437         if (ret)
438                 goto out;
439
440         /* Success - we successfully remapped the MR, so we can
441          * safely tear down the old mapping. */
442         rds_ib_teardown_mr(ibmr);
443
444         ibmr->sg = scat;
445         ibmr->sg_len = nents;
446         ibmr->sg_dma_len = sg_dma_len;
447         ibmr->remap_count++;
448
449         rds_ib_stats_inc(s_ib_rdma_mr_used);
450         ret = 0;
451
452 out:
453         kfree(dma_pages);
454
455         return ret;
456 }
457
458 void rds_ib_sync_mr(void *trans_private, int direction)
459 {
460         struct rds_ib_mr *ibmr = trans_private;
461         struct rds_ib_device *rds_ibdev = ibmr->device;
462
463         switch (direction) {
464         case DMA_FROM_DEVICE:
465                 ib_dma_sync_sg_for_cpu(rds_ibdev->dev, ibmr->sg,
466                         ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
467                 break;
468         case DMA_TO_DEVICE:
469                 ib_dma_sync_sg_for_device(rds_ibdev->dev, ibmr->sg,
470                         ibmr->sg_dma_len, DMA_BIDIRECTIONAL);
471                 break;
472         }
473 }
474
475 static void __rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
476 {
477         struct rds_ib_device *rds_ibdev = ibmr->device;
478
479         if (ibmr->sg_dma_len) {
480                 ib_dma_unmap_sg(rds_ibdev->dev,
481                                 ibmr->sg, ibmr->sg_len,
482                                 DMA_BIDIRECTIONAL);
483                 ibmr->sg_dma_len = 0;
484         }
485
486         /* Release the s/g list */
487         if (ibmr->sg_len) {
488                 unsigned int i;
489
490                 for (i = 0; i < ibmr->sg_len; ++i) {
491                         struct page *page = sg_page(&ibmr->sg[i]);
492
493                         /* FIXME we need a way to tell a r/w MR
494                          * from a r/o MR */
495                         BUG_ON(irqs_disabled());
496                         set_page_dirty(page);
497                         put_page(page);
498                 }
499                 kfree(ibmr->sg);
500
501                 ibmr->sg = NULL;
502                 ibmr->sg_len = 0;
503         }
504 }
505
506 static void rds_ib_teardown_mr(struct rds_ib_mr *ibmr)
507 {
508         unsigned int pinned = ibmr->sg_len;
509
510         __rds_ib_teardown_mr(ibmr);
511         if (pinned) {
512                 struct rds_ib_device *rds_ibdev = ibmr->device;
513                 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
514
515                 atomic_sub(pinned, &pool->free_pinned);
516         }
517 }
518
519 static inline unsigned int rds_ib_flush_goal(struct rds_ib_mr_pool *pool, int free_all)
520 {
521         unsigned int item_count;
522
523         item_count = atomic_read(&pool->item_count);
524         if (free_all)
525                 return item_count;
526
527         return 0;
528 }
529
530 /*
531  * given an xlist of mrs, put them all into the list_head for more processing
532  */
533 static void xlist_append_to_list(struct xlist_head *xlist, struct list_head *list)
534 {
535         struct rds_ib_mr *ibmr;
536         struct xlist_head splice;
537         struct xlist_head *cur;
538         struct xlist_head *next;
539
540         splice.next = NULL;
541         xlist_splice(xlist, &splice);
542         cur = splice.next;
543         while (cur) {
544                 next = cur->next;
545                 ibmr = list_entry(cur, struct rds_ib_mr, xlist);
546                 list_add_tail(&ibmr->unmap_list, list);
547                 cur = next;
548         }
549 }
550
551 /*
552  * this takes a list head of mrs and turns it into an xlist of clusters.
553  * each cluster has an xlist of MR_CLUSTER_SIZE mrs that are ready for
554  * reuse.
555  */
556 static void list_append_to_xlist(struct rds_ib_mr_pool *pool,
557                                 struct list_head *list, struct xlist_head *xlist,
558                                 struct xlist_head **tail_ret)
559 {
560         struct rds_ib_mr *ibmr;
561         struct xlist_head *cur_mr = xlist;
562         struct xlist_head *tail_mr = NULL;
563
564         list_for_each_entry(ibmr, list, unmap_list) {
565                 tail_mr = &ibmr->xlist;
566                 tail_mr->next = NULL;
567                 cur_mr->next = tail_mr;
568                 cur_mr = tail_mr;
569         }
570         *tail_ret = tail_mr;
571 }
572
573 /*
574  * Flush our pool of MRs.
575  * At a minimum, all currently unused MRs are unmapped.
576  * If the number of MRs allocated exceeds the limit, we also try
577  * to free as many MRs as needed to get back to this limit.
578  */
579 static int rds_ib_flush_mr_pool(struct rds_ib_mr_pool *pool,
580                                 int free_all, struct rds_ib_mr **ibmr_ret)
581 {
582         struct rds_ib_mr *ibmr, *next;
583         struct xlist_head clean_xlist;
584         struct xlist_head *clean_tail;
585         LIST_HEAD(unmap_list);
586         LIST_HEAD(fmr_list);
587         unsigned long unpinned = 0;
588         unsigned int nfreed = 0, ncleaned = 0, free_goal;
589         int ret = 0;
590
591         rds_ib_stats_inc(s_ib_rdma_mr_pool_flush);
592
593         if (ibmr_ret) {
594                 DEFINE_WAIT(wait);
595                 while(!mutex_trylock(&pool->flush_lock)) {
596                         ibmr = rds_ib_reuse_fmr(pool);
597                         if (ibmr) {
598                                 *ibmr_ret = ibmr;
599                                 finish_wait(&pool->flush_wait, &wait);
600                                 goto out_nolock;
601                         }
602
603                         prepare_to_wait(&pool->flush_wait, &wait,
604                                         TASK_UNINTERRUPTIBLE);
605                         if (xlist_empty(&pool->clean_list))
606                                 schedule();
607
608                         ibmr = rds_ib_reuse_fmr(pool);
609                         if (ibmr) {
610                                 *ibmr_ret = ibmr;
611                                 finish_wait(&pool->flush_wait, &wait);
612                                 goto out_nolock;
613                         }
614                 }
615                 finish_wait(&pool->flush_wait, &wait);
616         } else
617                 mutex_lock(&pool->flush_lock);
618
619         if (ibmr_ret) {
620                 ibmr = rds_ib_reuse_fmr(pool);
621                 if (ibmr) {
622                         *ibmr_ret = ibmr;
623                         goto out;
624                 }
625         }
626
627         /* Get the list of all MRs to be dropped. Ordering matters -
628          * we want to put drop_list ahead of free_list.
629          */
630         xlist_append_to_list(&pool->drop_list, &unmap_list);
631         xlist_append_to_list(&pool->free_list, &unmap_list);
632         if (free_all)
633                 xlist_append_to_list(&pool->clean_list, &unmap_list);
634
635         free_goal = rds_ib_flush_goal(pool, free_all);
636
637         if (list_empty(&unmap_list))
638                 goto out;
639
640         /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
641         list_for_each_entry(ibmr, &unmap_list, unmap_list)
642                 list_add(&ibmr->fmr->list, &fmr_list);
643
644         ret = ib_unmap_fmr(&fmr_list);
645         if (ret)
646                 printk(KERN_WARNING "RDS/IB: ib_unmap_fmr failed (err=%d)\n", ret);
647
648         /* Now we can destroy the DMA mapping and unpin any pages */
649         list_for_each_entry_safe(ibmr, next, &unmap_list, unmap_list) {
650                 unpinned += ibmr->sg_len;
651                 __rds_ib_teardown_mr(ibmr);
652                 if (nfreed < free_goal || ibmr->remap_count >= pool->fmr_attr.max_maps) {
653                         rds_ib_stats_inc(s_ib_rdma_mr_free);
654                         list_del(&ibmr->unmap_list);
655                         ib_dealloc_fmr(ibmr->fmr);
656                         kfree(ibmr);
657                         nfreed++;
658                 }
659                 ncleaned++;
660         }
661
662         if (!list_empty(&unmap_list)) {
663                 /* we have to make sure that none of the things we're about
664                  * to put on the clean list would race with other cpus trying
665                  * to pull items off.  The xlist would explode if we managed to
666                  * remove something from the clean list and then add it back again
667                  * while another CPU was spinning on that same item in xlist_del_head.
668                  *
669                  * This is pretty unlikely, but just in case  wait for an xlist grace period
670                  * here before adding anything back into the clean list.
671                  */
672                 wait_clean_list_grace();
673
674                 list_append_to_xlist(pool, &unmap_list, &clean_xlist, &clean_tail);
675                 if (ibmr_ret)
676                         refill_local(pool, &clean_xlist, ibmr_ret);
677
678                 /* refill_local may have emptied our list */
679                 if (!xlist_empty(&clean_xlist))
680                         xlist_add(clean_xlist.next, clean_tail, &pool->clean_list);
681
682         }
683
684         atomic_sub(unpinned, &pool->free_pinned);
685         atomic_sub(ncleaned, &pool->dirty_count);
686         atomic_sub(nfreed, &pool->item_count);
687
688 out:
689         mutex_unlock(&pool->flush_lock);
690         if (waitqueue_active(&pool->flush_wait))
691                 wake_up(&pool->flush_wait);
692 out_nolock:
693         return ret;
694 }
695
696 static void rds_ib_mr_pool_flush_worker(struct work_struct *work)
697 {
698         struct rds_ib_mr_pool *pool = container_of(work, struct rds_ib_mr_pool, flush_worker.work);
699
700         rds_ib_flush_mr_pool(pool, 0, NULL);
701 }
702
703 void rds_ib_free_mr(void *trans_private, int invalidate)
704 {
705         struct rds_ib_mr *ibmr = trans_private;
706         struct rds_ib_device *rds_ibdev = ibmr->device;
707         struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
708
709         rdsdebug("RDS/IB: free_mr nents %u\n", ibmr->sg_len);
710
711         /* Return it to the pool's free list */
712         if (ibmr->remap_count >= pool->fmr_attr.max_maps)
713                 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->drop_list);
714         else
715                 xlist_add(&ibmr->xlist, &ibmr->xlist, &pool->free_list);
716
717         atomic_add(ibmr->sg_len, &pool->free_pinned);
718         atomic_inc(&pool->dirty_count);
719
720         /* If we've pinned too many pages, request a flush */
721         if (atomic_read(&pool->free_pinned) >= pool->max_free_pinned ||
722             atomic_read(&pool->dirty_count) >= pool->max_items / 10)
723                 queue_delayed_work(rds_wq, &pool->flush_worker, 10);
724
725         if (invalidate) {
726                 if (likely(!in_interrupt())) {
727                         rds_ib_flush_mr_pool(pool, 0, NULL);
728                 } else {
729                         /* We get here if the user created a MR marked
730                          * as use_once and invalidate at the same time. */
731                         queue_delayed_work(rds_wq, &pool->flush_worker, 10);
732                 }
733         }
734
735         rds_ib_dev_put(rds_ibdev);
736 }
737
738 void rds_ib_flush_mrs(void)
739 {
740         struct rds_ib_device *rds_ibdev;
741
742         list_for_each_entry(rds_ibdev, &rds_ib_devices, list) {
743                 struct rds_ib_mr_pool *pool = rds_ibdev->mr_pool;
744
745                 if (pool)
746                         rds_ib_flush_mr_pool(pool, 0, NULL);
747         }
748 }
749
750 void *rds_ib_get_mr(struct scatterlist *sg, unsigned long nents,
751                     struct rds_sock *rs, u32 *key_ret)
752 {
753         struct rds_ib_device *rds_ibdev;
754         struct rds_ib_mr *ibmr = NULL;
755         int ret;
756
757         rds_ibdev = rds_ib_get_device(rs->rs_bound_addr);
758         if (!rds_ibdev) {
759                 ret = -ENODEV;
760                 goto out;
761         }
762
763         if (!rds_ibdev->mr_pool) {
764                 ret = -ENODEV;
765                 goto out;
766         }
767
768         ibmr = rds_ib_alloc_fmr(rds_ibdev);
769         if (IS_ERR(ibmr))
770                 return ibmr;
771
772         ret = rds_ib_map_fmr(rds_ibdev, ibmr, sg, nents);
773         if (ret == 0)
774                 *key_ret = ibmr->fmr->rkey;
775         else
776                 printk(KERN_WARNING "RDS/IB: map_fmr failed (errno=%d)\n", ret);
777
778         ibmr->device = rds_ibdev;
779         rds_ibdev = NULL;
780
781  out:
782         if (ret) {
783                 if (ibmr)
784                         rds_ib_free_mr(ibmr, 0);
785                 ibmr = ERR_PTR(ret);
786         }
787         if (rds_ibdev)
788                 rds_ib_dev_put(rds_ibdev);
789         return ibmr;
790 }
791