]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/gpu/drm/ttm/ttm_page_alloc.c
Fix ttm_page_alloc.c build breakage
[net-next-2.6.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
CommitLineData
1403b1a3
PN
1/*
2 * Copyright (c) Red Hat Inc.
3
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sub license,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the
12 * next paragraph) shall be included in all copies or substantial portions
13 * of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
22 *
23 * Authors: Dave Airlie <airlied@redhat.com>
24 * Jerome Glisse <jglisse@redhat.com>
25 * Pauli Nieminen <suokkos@gmail.com>
26 */
27
28/* simple list based uncached page pool
29 * - Pool collects resently freed pages for reuse
30 * - Use page->lru to keep a free list
31 * - doesn't track currently in use pages
32 */
33#include <linux/list.h>
34#include <linux/spinlock.h>
35#include <linux/highmem.h>
36#include <linux/mm_types.h>
07458661 37#include <linux/module.h>
1403b1a3 38#include <linux/mm.h>
4cdc840a 39#include <linux/seq_file.h> /* for seq_printf */
2125b8a4 40#include <linux/slab.h>
1403b1a3
PN
41
42#include <asm/atomic.h>
1403b1a3
PN
43
44#include "ttm/ttm_bo_driver.h"
45#include "ttm/ttm_page_alloc.h"
46
d6678651
TL
47#ifdef TTM_HAS_AGP
48#include <asm/agp.h>
49#endif
1403b1a3
PN
50
51#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
52#define SMALL_ALLOCATION 16
53#define FREE_ALL_PAGES (~0U)
54/* times are in msecs */
55#define PAGE_FREE_INTERVAL 1000
56
57/**
58 * struct ttm_page_pool - Pool to reuse recently allocated uc/wc pages.
59 *
60 * @lock: Protects the shared pool from concurrnet access. Must be used with
61 * irqsave/irqrestore variants because pool allocator maybe called from
62 * delayed work.
63 * @fill_lock: Prevent concurrent calls to fill.
64 * @list: Pool of free uc/wc pages for fast reuse.
65 * @gfp_flags: Flags to pass for alloc_page.
66 * @npages: Number of pages in pool.
67 */
68struct ttm_page_pool {
69 spinlock_t lock;
70 bool fill_lock;
71 struct list_head list;
72 int gfp_flags;
73 unsigned npages;
07458661
PN
74 char *name;
75 unsigned long nfrees;
76 unsigned long nrefills;
1403b1a3
PN
77};
78
c96af79e
PN
79/**
80 * Limits for the pool. They are handled without locks because only place where
81 * they may change is in sysfs store. They won't have immediate effect anyway
4abe4389 82 * so forcing serialization to access them is pointless.
c96af79e
PN
83 */
84
1403b1a3
PN
85struct ttm_pool_opts {
86 unsigned alloc_size;
87 unsigned max_size;
88 unsigned small;
89};
90
91#define NUM_POOLS 4
92
93/**
94 * struct ttm_pool_manager - Holds memory pools for fst allocation
95 *
96 * Manager is read only object for pool code so it doesn't need locking.
97 *
98 * @free_interval: minimum number of jiffies between freeing pages from pool.
99 * @page_alloc_inited: reference counting for pool allocation.
100 * @work: Work that is used to shrink the pool. Work is only run when there is
101 * some pages to free.
102 * @small_allocation: Limit in number of pages what is small allocation.
103 *
104 * @pools: All pool objects in use.
105 **/
106struct ttm_pool_manager {
c96af79e 107 struct kobject kobj;
1403b1a3 108 struct shrinker mm_shrink;
1403b1a3
PN
109 struct ttm_pool_opts options;
110
111 union {
112 struct ttm_page_pool pools[NUM_POOLS];
113 struct {
114 struct ttm_page_pool wc_pool;
115 struct ttm_page_pool uc_pool;
116 struct ttm_page_pool wc_pool_dma32;
117 struct ttm_page_pool uc_pool_dma32;
118 } ;
119 };
120};
121
c96af79e
PN
122static struct attribute ttm_page_pool_max = {
123 .name = "pool_max_size",
124 .mode = S_IRUGO | S_IWUSR
125};
126static struct attribute ttm_page_pool_small = {
127 .name = "pool_small_allocation",
128 .mode = S_IRUGO | S_IWUSR
129};
130static struct attribute ttm_page_pool_alloc_size = {
131 .name = "pool_allocation_size",
132 .mode = S_IRUGO | S_IWUSR
133};
134
135static struct attribute *ttm_pool_attrs[] = {
136 &ttm_page_pool_max,
137 &ttm_page_pool_small,
138 &ttm_page_pool_alloc_size,
139 NULL
140};
141
142static void ttm_pool_kobj_release(struct kobject *kobj)
143{
144 struct ttm_pool_manager *m =
145 container_of(kobj, struct ttm_pool_manager, kobj);
5870a4d9 146 kfree(m);
c96af79e
PN
147}
148
149static ssize_t ttm_pool_store(struct kobject *kobj,
150 struct attribute *attr, const char *buffer, size_t size)
151{
152 struct ttm_pool_manager *m =
153 container_of(kobj, struct ttm_pool_manager, kobj);
154 int chars;
155 unsigned val;
156 chars = sscanf(buffer, "%u", &val);
157 if (chars == 0)
158 return size;
159
160 /* Convert kb to number of pages */
161 val = val / (PAGE_SIZE >> 10);
162
163 if (attr == &ttm_page_pool_max)
164 m->options.max_size = val;
165 else if (attr == &ttm_page_pool_small)
166 m->options.small = val;
167 else if (attr == &ttm_page_pool_alloc_size) {
168 if (val > NUM_PAGES_TO_ALLOC*8) {
4abe4389
TH
169 printk(KERN_ERR TTM_PFX
170 "Setting allocation size to %lu "
171 "is not allowed. Recommended size is "
172 "%lu\n",
173 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
174 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
c96af79e
PN
175 return size;
176 } else if (val > NUM_PAGES_TO_ALLOC) {
4abe4389
TH
177 printk(KERN_WARNING TTM_PFX
178 "Setting allocation size to "
179 "larger than %lu is not recommended.\n",
180 NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
c96af79e
PN
181 }
182 m->options.alloc_size = val;
183 }
184
185 return size;
186}
187
188static ssize_t ttm_pool_show(struct kobject *kobj,
189 struct attribute *attr, char *buffer)
190{
191 struct ttm_pool_manager *m =
192 container_of(kobj, struct ttm_pool_manager, kobj);
193 unsigned val = 0;
194
195 if (attr == &ttm_page_pool_max)
196 val = m->options.max_size;
197 else if (attr == &ttm_page_pool_small)
198 val = m->options.small;
199 else if (attr == &ttm_page_pool_alloc_size)
200 val = m->options.alloc_size;
201
202 val = val * (PAGE_SIZE >> 10);
203
204 return snprintf(buffer, PAGE_SIZE, "%u\n", val);
205}
206
207static const struct sysfs_ops ttm_pool_sysfs_ops = {
208 .show = &ttm_pool_show,
209 .store = &ttm_pool_store,
210};
211
212static struct kobj_type ttm_pool_kobj_type = {
213 .release = &ttm_pool_kobj_release,
214 .sysfs_ops = &ttm_pool_sysfs_ops,
215 .default_attrs = ttm_pool_attrs,
216};
217
5870a4d9 218static struct ttm_pool_manager *_manager;
1403b1a3 219
975efdb1 220#ifndef CONFIG_X86
1403b1a3
PN
221static int set_pages_array_wb(struct page **pages, int addrinarray)
222{
223#ifdef TTM_HAS_AGP
224 int i;
225
226 for (i = 0; i < addrinarray; i++)
227 unmap_page_from_agp(pages[i]);
228#endif
229 return 0;
230}
231
232static int set_pages_array_wc(struct page **pages, int addrinarray)
233{
234#ifdef TTM_HAS_AGP
235 int i;
236
237 for (i = 0; i < addrinarray; i++)
238 map_page_into_agp(pages[i]);
239#endif
240 return 0;
241}
242
243static int set_pages_array_uc(struct page **pages, int addrinarray)
244{
245#ifdef TTM_HAS_AGP
246 int i;
247
248 for (i = 0; i < addrinarray; i++)
249 map_page_into_agp(pages[i]);
250#endif
251 return 0;
252}
253#endif
254
255/**
256 * Select the right pool or requested caching state and ttm flags. */
257static struct ttm_page_pool *ttm_get_pool(int flags,
258 enum ttm_caching_state cstate)
259{
260 int pool_index;
261
262 if (cstate == tt_cached)
263 return NULL;
264
265 if (cstate == tt_wc)
266 pool_index = 0x0;
267 else
268 pool_index = 0x1;
269
270 if (flags & TTM_PAGE_FLAG_DMA32)
271 pool_index |= 0x2;
272
5870a4d9 273 return &_manager->pools[pool_index];
1403b1a3
PN
274}
275
276/* set memory back to wb and free the pages. */
277static void ttm_pages_put(struct page *pages[], unsigned npages)
278{
279 unsigned i;
280 if (set_pages_array_wb(pages, npages))
4abe4389 281 printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
1403b1a3
PN
282 npages);
283 for (i = 0; i < npages; ++i)
284 __free_page(pages[i]);
285}
286
287static void ttm_pool_update_free_locked(struct ttm_page_pool *pool,
288 unsigned freed_pages)
289{
290 pool->npages -= freed_pages;
07458661 291 pool->nfrees += freed_pages;
1403b1a3
PN
292}
293
294/**
295 * Free pages from pool.
296 *
297 * To prevent hogging the ttm_swap process we only free NUM_PAGES_TO_ALLOC
298 * number of pages in one go.
299 *
300 * @pool: to free the pages from
301 * @free_all: If set to true will free all pages in pool
302 **/
303static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
304{
305 unsigned long irq_flags;
306 struct page *p;
307 struct page **pages_to_free;
308 unsigned freed_pages = 0,
309 npages_to_free = nr_free;
310
311 if (NUM_PAGES_TO_ALLOC < nr_free)
312 npages_to_free = NUM_PAGES_TO_ALLOC;
313
314 pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
315 GFP_KERNEL);
316 if (!pages_to_free) {
4abe4389
TH
317 printk(KERN_ERR TTM_PFX
318 "Failed to allocate memory for pool free operation.\n");
1403b1a3
PN
319 return 0;
320 }
321
322restart:
323 spin_lock_irqsave(&pool->lock, irq_flags);
324
325 list_for_each_entry_reverse(p, &pool->list, lru) {
326 if (freed_pages >= npages_to_free)
327 break;
328
329 pages_to_free[freed_pages++] = p;
330 /* We can only remove NUM_PAGES_TO_ALLOC at a time. */
331 if (freed_pages >= NUM_PAGES_TO_ALLOC) {
332 /* remove range of pages from the pool */
333 __list_del(p->lru.prev, &pool->list);
334
335 ttm_pool_update_free_locked(pool, freed_pages);
336 /**
337 * Because changing page caching is costly
338 * we unlock the pool to prevent stalling.
339 */
340 spin_unlock_irqrestore(&pool->lock, irq_flags);
341
342 ttm_pages_put(pages_to_free, freed_pages);
343 if (likely(nr_free != FREE_ALL_PAGES))
344 nr_free -= freed_pages;
345
346 if (NUM_PAGES_TO_ALLOC >= nr_free)
347 npages_to_free = nr_free;
348 else
349 npages_to_free = NUM_PAGES_TO_ALLOC;
350
351 freed_pages = 0;
352
353 /* free all so restart the processing */
354 if (nr_free)
355 goto restart;
356
357 /* Not allowed to fall tough or break because
358 * following context is inside spinlock while we are
359 * outside here.
360 */
361 goto out;
362
363 }
364 }
365
1403b1a3
PN
366 /* remove range of pages from the pool */
367 if (freed_pages) {
368 __list_del(&p->lru, &pool->list);
369
370 ttm_pool_update_free_locked(pool, freed_pages);
371 nr_free -= freed_pages;
372 }
373
374 spin_unlock_irqrestore(&pool->lock, irq_flags);
375
376 if (freed_pages)
377 ttm_pages_put(pages_to_free, freed_pages);
378out:
379 kfree(pages_to_free);
380 return nr_free;
381}
382
383/* Get good estimation how many pages are free in pools */
384static int ttm_pool_get_num_unused_pages(void)
385{
386 unsigned i;
387 int total = 0;
388 for (i = 0; i < NUM_POOLS; ++i)
5870a4d9 389 total += _manager->pools[i].npages;
1403b1a3
PN
390
391 return total;
392}
393
394/**
4abe4389 395 * Callback for mm to request pool to reduce number of page held.
1403b1a3
PN
396 */
397static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
398{
399 static atomic_t start_pool = ATOMIC_INIT(0);
400 unsigned i;
401 unsigned pool_offset = atomic_add_return(1, &start_pool);
402 struct ttm_page_pool *pool;
403
404 pool_offset = pool_offset % NUM_POOLS;
405 /* select start pool in round robin fashion */
406 for (i = 0; i < NUM_POOLS; ++i) {
407 unsigned nr_free = shrink_pages;
408 if (shrink_pages == 0)
409 break;
5870a4d9 410 pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
1403b1a3
PN
411 shrink_pages = ttm_page_pool_free(pool, nr_free);
412 }
413 /* return estimated number of unused pages in pool */
414 return ttm_pool_get_num_unused_pages();
415}
416
417static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
418{
419 manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
420 manager->mm_shrink.seeks = 1;
421 register_shrinker(&manager->mm_shrink);
422}
423
424static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager)
425{
426 unregister_shrinker(&manager->mm_shrink);
427}
428
429static int ttm_set_pages_caching(struct page **pages,
430 enum ttm_caching_state cstate, unsigned cpages)
431{
432 int r = 0;
433 /* Set page caching */
434 switch (cstate) {
435 case tt_uncached:
436 r = set_pages_array_uc(pages, cpages);
437 if (r)
4abe4389
TH
438 printk(KERN_ERR TTM_PFX
439 "Failed to set %d pages to uc!\n",
440 cpages);
1403b1a3
PN
441 break;
442 case tt_wc:
443 r = set_pages_array_wc(pages, cpages);
444 if (r)
4abe4389
TH
445 printk(KERN_ERR TTM_PFX
446 "Failed to set %d pages to wc!\n",
447 cpages);
1403b1a3
PN
448 break;
449 default:
450 break;
451 }
452 return r;
453}
454
455/**
456 * Free pages the pages that failed to change the caching state. If there is
457 * any pages that have changed their caching state already put them to the
458 * pool.
459 */
460static void ttm_handle_caching_state_failure(struct list_head *pages,
461 int ttm_flags, enum ttm_caching_state cstate,
462 struct page **failed_pages, unsigned cpages)
463{
464 unsigned i;
4abe4389 465 /* Failed pages have to be freed */
1403b1a3
PN
466 for (i = 0; i < cpages; ++i) {
467 list_del(&failed_pages[i]->lru);
468 __free_page(failed_pages[i]);
469 }
470}
471
472/**
473 * Allocate new pages with correct caching.
474 *
475 * This function is reentrant if caller updates count depending on number of
476 * pages returned in pages array.
477 */
478static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
479 int ttm_flags, enum ttm_caching_state cstate, unsigned count)
480{
481 struct page **caching_array;
482 struct page *p;
483 int r = 0;
484 unsigned i, cpages;
485 unsigned max_cpages = min(count,
486 (unsigned)(PAGE_SIZE/sizeof(struct page *)));
487
488 /* allocate array for page caching change */
489 caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
490
491 if (!caching_array) {
4abe4389
TH
492 printk(KERN_ERR TTM_PFX
493 "Unable to allocate table for new pages.");
1403b1a3
PN
494 return -ENOMEM;
495 }
496
497 for (i = 0, cpages = 0; i < count; ++i) {
498 p = alloc_page(gfp_flags);
499
500 if (!p) {
4abe4389 501 printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
1403b1a3
PN
502
503 /* store already allocated pages in the pool after
504 * setting the caching state */
505 if (cpages) {
4abe4389
TH
506 r = ttm_set_pages_caching(caching_array,
507 cstate, cpages);
1403b1a3
PN
508 if (r)
509 ttm_handle_caching_state_failure(pages,
510 ttm_flags, cstate,
511 caching_array, cpages);
512 }
513 r = -ENOMEM;
514 goto out;
515 }
516
517#ifdef CONFIG_HIGHMEM
518 /* gfp flags of highmem page should never be dma32 so we
519 * we should be fine in such case
520 */
521 if (!PageHighMem(p))
522#endif
523 {
524 caching_array[cpages++] = p;
525 if (cpages == max_cpages) {
526
527 r = ttm_set_pages_caching(caching_array,
528 cstate, cpages);
529 if (r) {
530 ttm_handle_caching_state_failure(pages,
531 ttm_flags, cstate,
532 caching_array, cpages);
533 goto out;
534 }
535 cpages = 0;
536 }
537 }
538
539 list_add(&p->lru, pages);
540 }
541
542 if (cpages) {
543 r = ttm_set_pages_caching(caching_array, cstate, cpages);
544 if (r)
545 ttm_handle_caching_state_failure(pages,
546 ttm_flags, cstate,
547 caching_array, cpages);
548 }
549out:
550 kfree(caching_array);
551
552 return r;
553}
554
555/**
556 * Fill the given pool if there isn't enough pages and requested number of
557 * pages is small.
558 */
559static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
560 int ttm_flags, enum ttm_caching_state cstate, unsigned count,
561 unsigned long *irq_flags)
562{
563 struct page *p;
564 int r;
565 unsigned cpages = 0;
566 /**
567 * Only allow one pool fill operation at a time.
568 * If pool doesn't have enough pages for the allocation new pages are
569 * allocated from outside of pool.
570 */
571 if (pool->fill_lock)
572 return;
573
574 pool->fill_lock = true;
575
576 /* If allocation request is small and there is not enough
577 * pages in pool we fill the pool first */
5870a4d9 578 if (count < _manager->options.small
1403b1a3
PN
579 && count > pool->npages) {
580 struct list_head new_pages;
5870a4d9 581 unsigned alloc_size = _manager->options.alloc_size;
1403b1a3
PN
582
583 /**
584 * Can't change page caching if in irqsave context. We have to
585 * drop the pool->lock.
586 */
587 spin_unlock_irqrestore(&pool->lock, *irq_flags);
588
589 INIT_LIST_HEAD(&new_pages);
590 r = ttm_alloc_new_pages(&new_pages, pool->gfp_flags, ttm_flags,
591 cstate, alloc_size);
592 spin_lock_irqsave(&pool->lock, *irq_flags);
593
594 if (!r) {
595 list_splice(&new_pages, &pool->list);
07458661 596 ++pool->nrefills;
1403b1a3
PN
597 pool->npages += alloc_size;
598 } else {
4abe4389
TH
599 printk(KERN_ERR TTM_PFX
600 "Failed to fill pool (%p).", pool);
1403b1a3
PN
601 /* If we have any pages left put them to the pool. */
602 list_for_each_entry(p, &pool->list, lru) {
603 ++cpages;
604 }
605 list_splice(&new_pages, &pool->list);
606 pool->npages += cpages;
607 }
608
609 }
610 pool->fill_lock = false;
611}
612
613/**
614 * Cut count nubmer of pages from the pool and put them to return list
615 *
616 * @return count of pages still to allocate to fill the request.
617 */
618static unsigned ttm_page_pool_get_pages(struct ttm_page_pool *pool,
619 struct list_head *pages, int ttm_flags,
620 enum ttm_caching_state cstate, unsigned count)
621{
622 unsigned long irq_flags;
623 struct list_head *p;
624 unsigned i;
625
626 spin_lock_irqsave(&pool->lock, irq_flags);
627 ttm_page_pool_fill_locked(pool, ttm_flags, cstate, count, &irq_flags);
628
629 if (count >= pool->npages) {
630 /* take all pages from the pool */
631 list_splice_init(&pool->list, pages);
632 count -= pool->npages;
633 pool->npages = 0;
634 goto out;
635 }
636 /* find the last pages to include for requested number of pages. Split
637 * pool to begin and halves to reduce search space. */
638 if (count <= pool->npages/2) {
639 i = 0;
640 list_for_each(p, &pool->list) {
641 if (++i == count)
642 break;
643 }
644 } else {
645 i = pool->npages + 1;
646 list_for_each_prev(p, &pool->list) {
647 if (--i == count)
648 break;
649 }
650 }
651 /* Cut count number of pages from pool */
652 list_cut_position(pages, &pool->list, p);
653 pool->npages -= count;
654 count = 0;
655out:
656 spin_unlock_irqrestore(&pool->lock, irq_flags);
657 return count;
658}
659
660/*
661 * On success pages list will hold count number of correctly
662 * cached pages.
663 */
664int ttm_get_pages(struct list_head *pages, int flags,
665 enum ttm_caching_state cstate, unsigned count)
666{
667 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
668 struct page *p = NULL;
7c2a9acf 669 int gfp_flags = GFP_USER;
1403b1a3
PN
670 int r;
671
672 /* set zero flag for page allocation if required */
673 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC)
674 gfp_flags |= __GFP_ZERO;
675
676 /* No pool for cached pages */
677 if (pool == NULL) {
678 if (flags & TTM_PAGE_FLAG_DMA32)
679 gfp_flags |= GFP_DMA32;
680 else
e8613c0e 681 gfp_flags |= GFP_HIGHUSER;
1403b1a3
PN
682
683 for (r = 0; r < count; ++r) {
684 p = alloc_page(gfp_flags);
685 if (!p) {
686
4abe4389
TH
687 printk(KERN_ERR TTM_PFX
688 "Unable to allocate page.");
1403b1a3
PN
689 return -ENOMEM;
690 }
691
692 list_add(&p->lru, pages);
693 }
694 return 0;
695 }
696
697
698 /* combine zero flag to pool flags */
699 gfp_flags |= pool->gfp_flags;
700
701 /* First we take pages from the pool */
702 count = ttm_page_pool_get_pages(pool, pages, flags, cstate, count);
703
704 /* clear the pages coming from the pool if requested */
705 if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) {
706 list_for_each_entry(p, pages, lru) {
707 clear_page(page_address(p));
708 }
709 }
710
711 /* If pool didn't have enough pages allocate new one. */
712 if (count > 0) {
713 /* ttm_alloc_new_pages doesn't reference pool so we can run
714 * multiple requests in parallel.
715 **/
716 r = ttm_alloc_new_pages(pages, gfp_flags, flags, cstate, count);
717 if (r) {
718 /* If there is any pages in the list put them back to
719 * the pool. */
4abe4389
TH
720 printk(KERN_ERR TTM_PFX
721 "Failed to allocate extra pages "
722 "for large request.");
1403b1a3
PN
723 ttm_put_pages(pages, 0, flags, cstate);
724 return r;
725 }
726 }
727
728
729 return 0;
730}
731
732/* Put all pages in pages list to correct pool to wait for reuse */
733void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
734 enum ttm_caching_state cstate)
735{
736 unsigned long irq_flags;
737 struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
738 struct page *p, *tmp;
739
740 if (pool == NULL) {
741 /* No pool for this memory type so free the pages */
742
743 list_for_each_entry_safe(p, tmp, pages, lru) {
744 __free_page(p);
745 }
746 /* Make the pages list empty */
747 INIT_LIST_HEAD(pages);
748 return;
749 }
750 if (page_count == 0) {
751 list_for_each_entry_safe(p, tmp, pages, lru) {
752 ++page_count;
753 }
754 }
755
756 spin_lock_irqsave(&pool->lock, irq_flags);
757 list_splice_init(pages, &pool->list);
758 pool->npages += page_count;
759 /* Check that we don't go over the pool limit */
760 page_count = 0;
5870a4d9
FJ
761 if (pool->npages > _manager->options.max_size) {
762 page_count = pool->npages - _manager->options.max_size;
1403b1a3
PN
763 /* free at least NUM_PAGES_TO_ALLOC number of pages
764 * to reduce calls to set_memory_wb */
765 if (page_count < NUM_PAGES_TO_ALLOC)
766 page_count = NUM_PAGES_TO_ALLOC;
767 }
768 spin_unlock_irqrestore(&pool->lock, irq_flags);
769 if (page_count)
770 ttm_page_pool_free(pool, page_count);
771}
772
07458661
PN
773static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
774 char *name)
1403b1a3
PN
775{
776 spin_lock_init(&pool->lock);
777 pool->fill_lock = false;
778 INIT_LIST_HEAD(&pool->list);
07458661 779 pool->npages = pool->nfrees = 0;
1403b1a3 780 pool->gfp_flags = flags;
07458661 781 pool->name = name;
1403b1a3
PN
782}
783
c96af79e 784int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
1403b1a3 785{
c96af79e 786 int ret;
5870a4d9
FJ
787
788 WARN_ON(_manager);
1403b1a3 789
4abe4389 790 printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
1403b1a3 791
5870a4d9 792 _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
1403b1a3 793
5870a4d9 794 ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
1403b1a3 795
5870a4d9 796 ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
1403b1a3 797
5870a4d9
FJ
798 ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
799 GFP_USER | GFP_DMA32, "wc dma");
1403b1a3 800
5870a4d9
FJ
801 ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
802 GFP_USER | GFP_DMA32, "uc dma");
1403b1a3 803
5870a4d9
FJ
804 _manager->options.max_size = max_pages;
805 _manager->options.small = SMALL_ALLOCATION;
806 _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
807
808 ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
809 &glob->kobj, "pool");
c96af79e 810 if (unlikely(ret != 0)) {
5870a4d9
FJ
811 kobject_put(&_manager->kobj);
812 _manager = NULL;
c96af79e
PN
813 return ret;
814 }
815
5870a4d9 816 ttm_pool_mm_shrink_init(_manager);
1403b1a3
PN
817
818 return 0;
819}
820
821void ttm_page_alloc_fini()
822{
823 int i;
824
4abe4389 825 printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
5870a4d9 826 ttm_pool_mm_shrink_fini(_manager);
1403b1a3
PN
827
828 for (i = 0; i < NUM_POOLS; ++i)
5870a4d9 829 ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
c96af79e 830
5870a4d9
FJ
831 kobject_put(&_manager->kobj);
832 _manager = NULL;
1403b1a3 833}
07458661
PN
834
835int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
836{
837 struct ttm_page_pool *p;
838 unsigned i;
839 char *h[] = {"pool", "refills", "pages freed", "size"};
5870a4d9 840 if (!_manager) {
07458661
PN
841 seq_printf(m, "No pool allocator running.\n");
842 return 0;
843 }
844 seq_printf(m, "%6s %12s %13s %8s\n",
845 h[0], h[1], h[2], h[3]);
846 for (i = 0; i < NUM_POOLS; ++i) {
5870a4d9 847 p = &_manager->pools[i];
07458661
PN
848
849 seq_printf(m, "%6s %12ld %13ld %8d\n",
850 p->name, p->nrefills,
851 p->nfrees, p->npages);
852 }
853 return 0;
854}
855EXPORT_SYMBOL(ttm_page_alloc_debugfs);