]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/ttm/ttm_page_alloc.c
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[net-next-2.6.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
CommitLineData
1403b1a3
PN
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
07458661 37#include <linux/module.h>
1403b1a3 38#include <linux/mm.h>
4cdc840a 39#include <linux/seq_file.h> /* for seq_printf */
2125b8a4 40#include <linux/slab.h>
1403b1a3
PN
41
42#include <asm/atomic.h>
43#include <asm/agp.h>
44
45#include "ttm/ttm_bo_driver.h"
46#include "ttm/ttm_page_alloc.h"
47
48
49#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
50#define SMALL_ALLOCATION 16
51#define FREE_ALL_PAGES (~0U)
52/* times are in msecs */
53#define PAGE_FREE_INTERVAL 1000
54
55/**
56 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
57 *
58 * @lock: Protects the shared pool from concurrnet access. Must be used with
59 * irqsave/irqrestore variants because pool allocator maybe called from
60 * delayed work.
61 * @fill_lock: Prevent concurrent calls to fill.
62 * @list: Pool of free uc/wc pages for fast reuse.
63 * @gfp_flags: Flags to pass for alloc_page.
64 * @npages: Number of pages in pool.
65 */
66struct ttm_page_pool {
67 spinlock_t lock;
68 bool fill_lock;
69 struct list_head list;
70 int gfp_flags;
71 unsigned npages;
07458661
PN
72 char *name;
73 unsigned long nfrees;
74 unsigned long nrefills;
1403b1a3
PN
75};
76
c96af79e
PN
77/**
78 * Limits for the pool. They are handled without locks because only place where
79 * they may change is in sysfs store. They won't have immediate effect anyway
4abe4389 80 * so forcing serialization to access them is pointless.
c96af79e
PN
81 */
82
1403b1a3
PN
83struct ttm_pool_opts {
84 unsigned alloc_size;
85 unsigned max_size;
86 unsigned small;
87};
88
89#define NUM_POOLS 4
90
91/**
92 * struct ttm_pool_manager - Holds memory pools for fst allocation
93 *
94 * Manager is read only object for pool code so it doesn't need locking.
95 *
96 * @free_interval: minimum number of jiffies between freeing pages from pool.
97 * @page_alloc_inited: reference counting for pool allocation.
98 * @work: Work that is used to shrink the pool. Work is only run when there is
99 * some pages to free.
100 * @small_allocation: Limit in number of pages what is small allocation.
101 *
102 * @pools: All pool objects in use.
103 **/
104struct ttm_pool_manager {
c96af79e 105 struct kobject kobj;
1403b1a3 106 struct shrinker mm_shrink;
1403b1a3
PN
107 struct ttm_pool_opts options;
108
109 union {
110 struct ttm_page_pool pools[NUM_POOLS];
111 struct {
112 struct ttm_page_pool wc_pool;
113 struct ttm_page_pool uc_pool;
114 struct ttm_page_pool wc_pool_dma32;
115 struct ttm_page_pool uc_pool_dma32;
116 } ;
117 };
118};
119
c96af79e
PN
120static struct attribute ttm_page_pool_max = {
121 .name = "pool_max_size",
122 .mode = S_IRUGO | S_IWUSR
123};
124static struct attribute ttm_page_pool_small = {
125 .name = "pool_small_allocation",
126 .mode = S_IRUGO | S_IWUSR
127};
128static struct attribute ttm_page_pool_alloc_size = {
129 .name = "pool_allocation_size",
130 .mode = S_IRUGO | S_IWUSR
131};
132
133static struct attribute *ttm_pool_attrs[] = {
134 &ttm_page_pool_max,
135 &ttm_page_pool_small,
136 &ttm_page_pool_alloc_size,
137 NULL
138};
139
140static void ttm_pool_kobj_release(struct kobject *kobj)
141{
142 struct ttm_pool_manager *m =
143 container_of(kobj, struct ttm_pool_manager, kobj);
5870a4d9 144 kfree(m);
c96af79e
PN
145}
146
147static ssize_t ttm_pool_store(struct kobject *kobj,
148 struct attribute *attr, const char *buffer, size_t size)
149{
150 struct ttm_pool_manager *m =
151 container_of(kobj, struct ttm_pool_manager, kobj);
152 int chars;
153 unsigned val;
154 chars = sscanf(buffer, "%u", &val);
155 if (chars == 0)
156 return size;
157
158 /* Convert kb to number of pages */
159 val = val / (PAGE_SIZE >> 10);
160
161 if (attr == &ttm_page_pool_max)
162 m->options.max_size = val;
163 else if (attr == &ttm_page_pool_small)
164 m->options.small = val;
165 else if (attr == &ttm_page_pool_alloc_size) {
166 if (val > NUM_PAGES_TO_ALLOC*8) {
4abe4389
TH
167 printk(KERN_ERR TTM_PFX
168 "Setting allocation size to %lu "
169 "is not allowed. Recommended size is "
170 "%lu\n",
171 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
172 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
c96af79e
PN
173 return size;
174 } else if (val > NUM_PAGES_TO_ALLOC) {
4abe4389
TH
175 printk(KERN_WARNING TTM_PFX
176 "Setting allocation size to "
177 "larger than %lu is not recommended.\n",
178 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
c96af79e
PN
179 }
180 m->options.alloc_size = val;
181 }
182
183 return size;
184}
185
186static ssize_t ttm_pool_show(struct kobject *kobj,
187 struct attribute *attr, char *buffer)
188{
189 struct ttm_pool_manager *m =
190 container_of(kobj, struct ttm_pool_manager, kobj);
191 unsigned val = 0;
192
193 if (attr == &ttm_page_pool_max)
194 val = m->options.max_size;
195 else if (attr == &ttm_page_pool_small)
196 val = m->options.small;
197 else if (attr == &ttm_page_pool_alloc_size)
198 val = m->options.alloc_size;
199
200 val = val * (PAGE_SIZE >> 10);
201
202 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
203}
204
205static const struct sysfs_ops ttm_pool_sysfs_ops = {
206 .show = &ttm_pool_show,
207 .store = &ttm_pool_store,
208};
209
210static struct kobj_type ttm_pool_kobj_type = {
211 .release = &ttm_pool_kobj_release,
212 .sysfs_ops = &ttm_pool_sysfs_ops,
213 .default_attrs = ttm_pool_attrs,
214};
215
5870a4d9 216static struct ttm_pool_manager *_manager;
1403b1a3 217
975efdb1 218#ifndef CONFIG_X86
1403b1a3
PN
219static int set_pages_array_wb(struct page **pages, int addrinarray)
220{
221#ifdef TTM_HAS_AGP
222 int i;
223
224 for (i = 0; i < addrinarray; i++)
225 unmap_page_from_agp(pages[i]);
226#endif
227 return 0;
228}
229
230static int set_pages_array_wc(struct page **pages, int addrinarray)
231{
232#ifdef TTM_HAS_AGP
233 int i;
234
235 for (i = 0; i < addrinarray; i++)
236 map_page_into_agp(pages[i]);
237#endif
238 return 0;
239}
240
241static int set_pages_array_uc(struct page **pages, int addrinarray)
242{
243#ifdef TTM_HAS_AGP
244 int i;
245
246 for (i = 0; i < addrinarray; i++)
247 map_page_into_agp(pages[i]);
248#endif
249 return 0;
250}
251#endif
252
253/**
254 * Select the right pool or requested caching state and ttm flags. */
255static struct ttm_page_pool *ttm_get_pool(int flags,
256 enum ttm_caching_state cstate)
257{
258 int pool_index;
259
260 if (cstate == tt_cached)
261 return NULL;
262
263 if (cstate == tt_wc)
264 pool_index = 0x0;
265 else
266 pool_index = 0x1;
267
268 if (flags & TTM_PAGE_FLAG_DMA32)
269 pool_index |= 0x2;
270
5870a4d9 271 return &_manager->pools[pool_index];
1403b1a3
PN
272}
273
274/* set memory back to wb and free the pages. */
275static void ttm_pages_put(struct page *pages[], unsigned npages)
276{
277 unsigned i;
278 if (set_pages_array_wb(pages, npages))
4abe4389 279 printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
1403b1a3
PN
280 npages);
281 for (i = 0; i < npages; ++i)
282 __free_page(pages[i]);
283}
284
285static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
286 unsigned freed_pages)
287{
288 pool->npages -= freed_pages;
07458661 289 pool->nfrees += freed_pages;
1403b1a3
PN
290}
291
292/**
293 * Free pages from pool.
294 *
295 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
296 * number of pages in one go.
297 *
298 * @pool: to free the pages from
299 * @free_all: If set to true will free all pages in pool
300 **/
301static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
302{
303 unsigned long irq_flags;
304 struct page *p;
305 struct page **pages_to_free;
306 unsigned freed_pages = 0,
307 npages_to_free = nr_free;
308
309 if (NUM_PAGES_TO_ALLOC < nr_free)
310 npages_to_free = NUM_PAGES_TO_ALLOC;
311
312 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
313 GFP_KERNEL);
314 if (!pages_to_free) {
4abe4389
TH
315 printk(KERN_ERR TTM_PFX
316 "Failed to allocate memory for pool free operation.\n");
1403b1a3
PN
317 return 0;
318 }
319
320restart:
321 spin_lock_irqsave(&pool->lock, irq_flags);
322
323 list_for_each_entry_reverse(p, &pool->list, lru) {
324 if (freed_pages >= npages_to_free)
325 break;
326
327 pages_to_free[freed_pages++] = p;
328 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
329 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
330 /* remove range of pages from the pool */
331 __list_del(p->lru.prev, &pool->list);
332
333 ttm_pool_update_free_locked(pool, freed_pages);
334 /**
335 * Because changing page caching is costly
336 * we unlock the pool to prevent stalling.
337 */
338 spin_unlock_irqrestore(&pool->lock, irq_flags);
339
340 ttm_pages_put(pages_to_free, freed_pages);
341 if (likely(nr_free != FREE_ALL_PAGES))
342 nr_free -= freed_pages;
343
344 if (NUM_PAGES_TO_ALLOC >= nr_free)
345 npages_to_free = nr_free;
346 else
347 npages_to_free = NUM_PAGES_TO_ALLOC;
348
349 freed_pages = 0;
350
351 /* free all so restart the processing */
352 if (nr_free)
353 goto restart;
354
355 /* Not allowed to fall tough or break because
356 * following context is inside spinlock while we are
357 * outside here.
358 */
359 goto out;
360
361 }
362 }
363
1403b1a3
PN
364 /* remove range of pages from the pool */
365 if (freed_pages) {
366 __list_del(&p->lru, &pool->list);
367
368 ttm_pool_update_free_locked(pool, freed_pages);
369 nr_free -= freed_pages;
370 }
371
372 spin_unlock_irqrestore(&pool->lock, irq_flags);
373
374 if (freed_pages)
375 ttm_pages_put(pages_to_free, freed_pages);
376out:
377 kfree(pages_to_free);
378 return nr_free;
379}
380
381/* Get good estimation how many pages are free in pools */
382static int ttm_pool_get_num_unused_pages(void)
383{
384 unsigned i;
385 int total = 0;
386 for (i = 0; i < NUM_POOLS; ++i)
5870a4d9 387 total += _manager->pools[i].npages;
1403b1a3
PN
388
389 return total;
390}
391
392/**
4abe4389 393 * Callback for mm to request pool to reduce number of page held.
1403b1a3
PN
394 */
395static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
396{
397 static atomic_t start_pool = ATOMIC_INIT(0);
398 unsigned i;
399 unsigned pool_offset = atomic_add_return(1, &start_pool);
400 struct ttm_page_pool *pool;
401
402 pool_offset = pool_offset % NUM_POOLS;
403 /* select start pool in round robin fashion */
404 for (i = 0; i < NUM_POOLS; ++i) {
405 unsigned nr_free = shrink_pages;
406 if (shrink_pages == 0)
407 break;
5870a4d9 408 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
1403b1a3
PN
409 shrink_pages = ttm_page_pool_free(pool, nr_free);
410 }
411 /* return estimated number of unused pages in pool */
412 return ttm_pool_get_num_unused_pages();
413}
414
415static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
416{
417 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
418 manager->mm_shrink.seeks = 1;
419 register_shrinker(&manager->mm_shrink);
420}
421
422static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
423{
424 unregister_shrinker(&manager->mm_shrink);
425}
426
427static int ttm_set_pages_caching(struct page **pages,
428 enum ttm_caching_state cstate, unsigned cpages)
429{
430 int r = 0;
431 /* Set page caching */
432 switch (cstate) {
433 case tt_uncached:
434 r = set_pages_array_uc(pages, cpages);
435 if (r)
4abe4389
TH
436 printk(KERN_ERR TTM_PFX
437 "Failed to set %d pages to uc!\n",
438 cpages);
1403b1a3
PN
439 break;
440 case tt_wc:
441 r = set_pages_array_wc(pages, cpages);
442 if (r)
4abe4389
TH
443 printk(KERN_ERR TTM_PFX
444 "Failed to set %d pages to wc!\n",
445 cpages);
1403b1a3
PN
446 break;
447 default:
448 break;
449 }
450 return r;
451}
452
453/**
454 * Free pages the pages that failed to change the caching state. If there is
455 * any pages that have changed their caching state already put them to the
456 * pool.
457 */
458static void ttm_handle_caching_state_failure(struct list_head *pages,
459 int ttm_flags, enum ttm_caching_state cstate,
460 struct page **failed_pages, unsigned cpages)
461{
462 unsigned i;
4abe4389 463 /* Failed pages have to be freed */
1403b1a3
PN
464 for (i = 0; i < cpages; ++i) {
465 list_del(&failed_pages[i]->lru);
466 __free_page(failed_pages[i]);
467 }
468}
469
470/**
471 * Allocate new pages with correct caching.
472 *
473 * This function is reentrant if caller updates count depending on number of
474 * pages returned in pages array.
475 */
476static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
477 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
478{
479 struct page **caching_array;
480 struct page *p;
481 int r = 0;
482 unsigned i, cpages;
483 unsigned max_cpages = min(count,
484 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
485
486 /* allocate array for page caching change */
487 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
488
489 if (!caching_array) {
4abe4389
TH
490 printk(KERN_ERR TTM_PFX
491 "Unable to allocate table for new pages.");
1403b1a3
PN
492 return -ENOMEM;
493 }
494
495 for (i = 0, cpages = 0; i < count; ++i) {
496 p = alloc_page(gfp_flags);
497
498 if (!p) {
4abe4389 499 printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
1403b1a3
PN
500
501 /* store already allocated pages in the pool after
502 * setting the caching state */
503 if (cpages) {
4abe4389
TH
504 r = ttm_set_pages_caching(caching_array,
505 cstate, cpages);
1403b1a3
PN
506 if (r)
507 ttm_handle_caching_state_failure(pages,
508 ttm_flags, cstate,
509 caching_array, cpages);
510 }
511 r = -ENOMEM;
512 goto out;
513 }
514
515#ifdef CONFIG_HIGHMEM
516 /* gfp flags of highmem page should never be dma32 so we
517 * we should be fine in such case
518 */
519 if (!PageHighMem(p))
520#endif
521 {
522 caching_array[cpages++] = p;
523 if (cpages == max_cpages) {
524
525 r = ttm_set_pages_caching(caching_array,
526 cstate, cpages);
527 if (r) {
528 ttm_handle_caching_state_failure(pages,
529 ttm_flags, cstate,
530 caching_array, cpages);
531 goto out;
532 }
533 cpages = 0;
534 }
535 }
536
537 list_add(&p->lru, pages);
538 }
539
540 if (cpages) {
541 r = ttm_set_pages_caching(caching_array, cstate, cpages);
542 if (r)
543 ttm_handle_caching_state_failure(pages,
544 ttm_flags, cstate,
545 caching_array, cpages);
546 }
547out:
548 kfree(caching_array);
549
550 return r;
551}
552
553/**
554 * Fill the given pool if there isn't enough pages and requested number of
555 * pages is small.
556 */
557static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
558 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
559 unsigned long *irq_flags)
560{
561 struct page *p;
562 int r;
563 unsigned cpages = 0;
564 /**
565 * Only allow one pool fill operation at a time.
566 * If pool doesn't have enough pages for the allocation new pages are
567 * allocated from outside of pool.
568 */
569 if (pool->fill_lock)
570 return;
571
572 pool->fill_lock = true;
573
574 /* If allocation request is small and there is not enough
575 * pages in pool we fill the pool first */
5870a4d9 576 if (count < _manager->options.small
1403b1a3
PN
577 && count > pool->npages) {
578 struct list_head new_pages;
5870a4d9 579 unsigned alloc_size = _manager->options.alloc_size;
1403b1a3
PN
580
581 /**
582 * Can't change page caching if in irqsave context. We have to
583 * drop the pool->lock.
584 */
585 spin_unlock_irqrestore(&pool->lock, *irq_flags);
586
587 INIT_LIST_HEAD(&new_pages);
588 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
589 cstate, alloc_size);
590 spin_lock_irqsave(&pool->lock, *irq_flags);
591
592 if (!r) {
593 list_splice(&new_pages, &pool->list);
07458661 594 ++pool->nrefills;
1403b1a3
PN
595 pool->npages += alloc_size;
596 } else {
4abe4389
TH
597 printk(KERN_ERR TTM_PFX
598 "Failed to fill pool (%p).", pool);
1403b1a3
PN
599 /* If we have any pages left put them to the pool. */
600 list_for_each_entry(p, &pool->list, lru) {
601 ++cpages;
602 }
603 list_splice(&new_pages, &pool->list);
604 pool->npages += cpages;
605 }
606
607 }
608 pool->fill_lock = false;
609}
610
611/**
612 * Cut count nubmer of pages from the pool and put them to return list
613 *
614 * @return count of pages still to allocate to fill the request.
615 */
616static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
617 struct list_head *pages, int ttm_flags,
618 enum ttm_caching_state cstate, unsigned count)
619{
620 unsigned long irq_flags;
621 struct list_head *p;
622 unsigned i;
623
624 spin_lock_irqsave(&pool->lock, irq_flags);
625 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
626
627 if (count >= pool->npages) {
628 /* take all pages from the pool */
629 list_splice_init(&pool->list, pages);
630 count -= pool->npages;
631 pool->npages = 0;
632 goto out;
633 }
634 /* find the last pages to include for requested number of pages. Split
635 * pool to begin and halves to reduce search space. */
636 if (count <= pool->npages/2) {
637 i = 0;
638 list_for_each(p, &pool->list) {
639 if (++i == count)
640 break;
641 }
642 } else {
643 i = pool->npages + 1;
644 list_for_each_prev(p, &pool->list) {
645 if (--i == count)
646 break;
647 }
648 }
649 /* Cut count number of pages from pool */
650 list_cut_position(pages, &pool->list, p);
651 pool->npages -= count;
652 count = 0;
653out:
654 spin_unlock_irqrestore(&pool->lock, irq_flags);
655 return count;
656}
657
658/*
659 * On success pages list will hold count number of correctly
660 * cached pages.
661 */
662int ttm_get_pages(struct list_head *pages, int flags,
663 enum ttm_caching_state cstate, unsigned count)
664{
665 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
666 struct page *p = NULL;
7c2a9acf 667 int gfp_flags = GFP_USER;
1403b1a3
PN
668 int r;
669
670 /* set zero flag for page allocation if required */
671 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
672 gfp_flags |= __GFP_ZERO;
673
674 /* No pool for cached pages */
675 if (pool == NULL) {
676 if (flags & TTM_PAGE_FLAG_DMA32)
677 gfp_flags |= GFP_DMA32;
678 else
e8613c0e 679 gfp_flags |= GFP_HIGHUSER;
1403b1a3
PN
680
681 for (r = 0; r < count; ++r) {
682 p = alloc_page(gfp_flags);
683 if (!p) {
684
4abe4389
TH
685 printk(KERN_ERR TTM_PFX
686 "Unable to allocate page.");
1403b1a3
PN
687 return -ENOMEM;
688 }
689
690 list_add(&p->lru, pages);
691 }
692 return 0;
693 }
694
695
696 /* combine zero flag to pool flags */
697 gfp_flags |= pool->gfp_flags;
698
699 /* First we take pages from the pool */
700 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
701
702 /* clear the pages coming from the pool if requested */
703 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
704 list_for_each_entry(p, pages, lru) {
705 clear_page(page_address(p));
706 }
707 }
708
709 /* If pool didn't have enough pages allocate new one. */
710 if (count > 0) {
711 /* ttm_alloc_new_pages doesn't reference pool so we can run
712 * multiple requests in parallel.
713 **/
714 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
715 if (r) {
716 /* If there is any pages in the list put them back to
717 * the pool. */
4abe4389
TH
718 printk(KERN_ERR TTM_PFX
719 "Failed to allocate extra pages "
720 "for large request.");
1403b1a3
PN
721 ttm_put_pages(pages, 0, flags, cstate);
722 return r;
723 }
724 }
725
726
727 return 0;
728}
729
730/* Put all pages in pages list to correct pool to wait for reuse */
731void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
732 enum ttm_caching_state cstate)
733{
734 unsigned long irq_flags;
735 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
736 struct page *p, *tmp;
737
738 if (pool == NULL) {
739 /* No pool for this memory type so free the pages */
740
741 list_for_each_entry_safe(p, tmp, pages, lru) {
742 __free_page(p);
743 }
744 /* Make the pages list empty */
745 INIT_LIST_HEAD(pages);
746 return;
747 }
748 if (page_count == 0) {
749 list_for_each_entry_safe(p, tmp, pages, lru) {
750 ++page_count;
751 }
752 }
753
754 spin_lock_irqsave(&pool->lock, irq_flags);
755 list_splice_init(pages, &pool->list);
756 pool->npages += page_count;
757 /* Check that we don't go over the pool limit */
758 page_count = 0;
5870a4d9
FJ
759 if (pool->npages > _manager->options.max_size) {
760 page_count = pool->npages - _manager->options.max_size;
1403b1a3
PN
761 /* free at least NUM_PAGES_TO_ALLOC number of pages
762 * to reduce calls to set_memory_wb */
763 if (page_count < NUM_PAGES_TO_ALLOC)
764 page_count = NUM_PAGES_TO_ALLOC;
765 }
766 spin_unlock_irqrestore(&pool->lock, irq_flags);
767 if (page_count)
768 ttm_page_pool_free(pool, page_count);
769}
770
07458661
PN
771static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
772 char *name)
1403b1a3
PN
773{
774 spin_lock_init(&pool->lock);
775 pool->fill_lock = false;
776 INIT_LIST_HEAD(&pool->list);
07458661 777 pool->npages = pool->nfrees = 0;
1403b1a3 778 pool->gfp_flags = flags;
07458661 779 pool->name = name;
1403b1a3
PN
780}
781
c96af79e 782int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1403b1a3 783{
c96af79e 784 int ret;
5870a4d9
FJ
785
786 WARN_ON(_manager);
1403b1a3 787
4abe4389 788 printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
1403b1a3 789
5870a4d9 790 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1403b1a3 791
5870a4d9 792 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
1403b1a3 793
5870a4d9 794 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
1403b1a3 795
5870a4d9
FJ
796 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
797 GFP_USER | GFP_DMA32, "wc dma");
1403b1a3 798
5870a4d9
FJ
799 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
800 GFP_USER | GFP_DMA32, "uc dma");
1403b1a3 801
5870a4d9
FJ
802 _manager->options.max_size = max_pages;
803 _manager->options.small = SMALL_ALLOCATION;
804 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
805
806 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
807 &glob->kobj, "pool");
c96af79e 808 if (unlikely(ret != 0)) {
5870a4d9
FJ
809 kobject_put(&_manager->kobj);
810 _manager = NULL;
c96af79e
PN
811 return ret;
812 }
813
5870a4d9 814 ttm_pool_mm_shrink_init(_manager);
1403b1a3
PN
815
816 return 0;
817}
818
819void ttm_page_alloc_fini()
820{
821 int i;
822
4abe4389 823 printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
5870a4d9 824 ttm_pool_mm_shrink_fini(_manager);
1403b1a3
PN
825
826 for (i = 0; i < NUM_POOLS; ++i)
5870a4d9 827 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
c96af79e 828
5870a4d9
FJ
829 kobject_put(&_manager->kobj);
830 _manager = NULL;
1403b1a3 831}
07458661
PN
832
833int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
834{
835 struct ttm_page_pool *p;
836 unsigned i;
837 char *h[] = {"pool", "refills", "pages freed", "size"};
5870a4d9 838 if (!_manager) {
07458661
PN
839 seq_printf(m, "No pool allocator running.\n");
840 return 0;
841 }
842 seq_printf(m, "%6s %12s %13s %8s\n",
843 h[0], h[1], h[2], h[3]);
844 for (i = 0; i < NUM_POOLS; ++i) {
5870a4d9 845 p = &_manager->pools[i];
07458661
PN
846
847 seq_printf(m, "%6s %12ld %13ld %8d\n",
848 p->name, p->nrefills,
849 p->nfrees, p->npages);
850 }
851 return 0;
852}
853EXPORT_SYMBOL(ttm_page_alloc_debugfs);