]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/gpu/drm/ttm/ttm_page_alloc.c
Merge branch 'drm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied...
[net-next-2.6.git] / drivers / gpu / drm / ttm / ttm_page_alloc.c
index 0d9a42c2394f79983344cd8bd21d99204a288109..ca904799f018a6e3ae23c80933bd1e007aec8f7f 100644 (file)
 #include <linux/slab.h>
 
 #include <asm/atomic.h>
-#include <asm/agp.h>
 
 #include "ttm/ttm_bo_driver.h"
 #include "ttm/ttm_page_alloc.h"
 
+#ifdef TTM_HAS_AGP
+#include <asm/agp.h>
+#endif
 
 #define NUM_PAGES_TO_ALLOC             (PAGE_SIZE/sizeof(struct page *))
 #define SMALL_ALLOCATION               16
@@ -77,7 +79,7 @@ struct ttm_page_pool {
 /**
  * Limits for the pool. They are handled without locks because only place where
  * they may change is in sysfs store. They won't have immediate effect anyway
- * so forcing serialiazation to access them is pointless.
+ * so forcing serialization to access them is pointless.
  */
 
 struct ttm_pool_opts {
@@ -104,7 +106,6 @@ struct ttm_pool_opts {
 struct ttm_pool_manager {
        struct kobject          kobj;
        struct shrinker         mm_shrink;
-       atomic_t                page_alloc_inited;
        struct ttm_pool_opts    options;
 
        union {
@@ -142,7 +143,7 @@ static void ttm_pool_kobj_release(struct kobject *kobj)
 {
        struct ttm_pool_manager *m =
                container_of(kobj, struct ttm_pool_manager, kobj);
-       (void)m;
+       kfree(m);
 }
 
 static ssize_t ttm_pool_store(struct kobject *kobj,
@@ -165,16 +166,18 @@ static ssize_t ttm_pool_store(struct kobject *kobj,
                m->options.small = val;
        else if (attr == &ttm_page_pool_alloc_size) {
                if (val > NUM_PAGES_TO_ALLOC*8) {
-                       printk(KERN_ERR "[ttm] Setting allocation size to %lu "
-                                       "is not allowed. Recomended size is "
-                                       "%lu\n",
-                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
-                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+                       printk(KERN_ERR TTM_PFX
+                              "Setting allocation size to %lu "
+                              "is not allowed. Recommended size is "
+                              "%lu\n",
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 7),
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
                        return size;
                } else if (val > NUM_PAGES_TO_ALLOC) {
-                       printk(KERN_WARNING "[ttm] Setting allocation size to "
-                                       "larger than %lu is not recomended.\n",
-                                       NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
+                       printk(KERN_WARNING TTM_PFX
+                              "Setting allocation size to "
+                              "larger than %lu is not recommended.\n",
+                              NUM_PAGES_TO_ALLOC*(PAGE_SIZE >> 10));
                }
                m->options.alloc_size = val;
        }
@@ -212,9 +215,7 @@ static struct kobj_type ttm_pool_kobj_type = {
        .default_attrs = ttm_pool_attrs,
 };
 
-static struct ttm_pool_manager _manager = {
-       .page_alloc_inited      = ATOMIC_INIT(0)
-};
+static struct ttm_pool_manager *_manager;
 
 #ifndef CONFIG_X86
 static int set_pages_array_wb(struct page **pages, int addrinarray)
@@ -269,7 +270,7 @@ static struct ttm_page_pool *ttm_get_pool(int flags,
        if (flags & TTM_PAGE_FLAG_DMA32)
                pool_index |= 0x2;
 
-       return &_manager.pools[pool_index];
+       return &_manager->pools[pool_index];
 }
 
 /* set memory back to wb and free the pages. */
@@ -277,7 +278,7 @@ static void ttm_pages_put(struct page *pages[], unsigned npages)
 {
        unsigned i;
        if (set_pages_array_wb(pages, npages))
-               printk(KERN_ERR "[ttm] Failed to set %d pages to wb!\n",
+               printk(KERN_ERR TTM_PFX "Failed to set %d pages to wb!\n",
                                npages);
        for (i = 0; i < npages; ++i)
                __free_page(pages[i]);
@@ -313,7 +314,8 @@ static int ttm_page_pool_free(struct ttm_page_pool *pool, unsigned nr_free)
        pages_to_free = kmalloc(npages_to_free * sizeof(struct page *),
                        GFP_KERNEL);
        if (!pages_to_free) {
-               printk(KERN_ERR "Failed to allocate memory for pool free operation.\n");
+               printk(KERN_ERR TTM_PFX
+                      "Failed to allocate memory for pool free operation.\n");
                return 0;
        }
 
@@ -384,15 +386,15 @@ static int ttm_pool_get_num_unused_pages(void)
        unsigned i;
        int total = 0;
        for (i = 0; i < NUM_POOLS; ++i)
-               total += _manager.pools[i].npages;
+               total += _manager->pools[i].npages;
 
        return total;
 }
 
 /**
- * Calback for mm to request pool to reduce number of page held.
+ * Callback for mm to request pool to reduce number of page held.
  */
-static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
+static int ttm_pool_mm_shrink(struct shrinker *shrink, int shrink_pages, gfp_t gfp_mask)
 {
        static atomic_t start_pool = ATOMIC_INIT(0);
        unsigned i;
@@ -405,7 +407,7 @@ static int ttm_pool_mm_shrink(int shrink_pages, gfp_t gfp_mask)
                unsigned nr_free = shrink_pages;
                if (shrink_pages == 0)
                        break;
-               pool = &_manager.pools[(i + pool_offset)%NUM_POOLS];
+               pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
                shrink_pages = ttm_page_pool_free(pool, nr_free);
        }
        /* return estimated number of unused pages in pool */
@@ -433,14 +435,16 @@ static int ttm_set_pages_caching(struct page **pages,
        case tt_uncached:
                r = set_pages_array_uc(pages, cpages);
                if (r)
-                       printk(KERN_ERR "[ttm] Failed to set %d pages to uc!\n",
-                                       cpages);
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to set %d pages to uc!\n",
+                              cpages);
                break;
        case tt_wc:
                r = set_pages_array_wc(pages, cpages);
                if (r)
-                       printk(KERN_ERR "[ttm] Failed to set %d pages to wc!\n",
-                                       cpages);
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to set %d pages to wc!\n",
+                              cpages);
                break;
        default:
                break;
@@ -458,7 +462,7 @@ static void ttm_handle_caching_state_failure(struct list_head *pages,
                struct page **failed_pages, unsigned cpages)
 {
        unsigned i;
-       /* Failed pages has to be reed */
+       /* Failed pages have to be freed */
        for (i = 0; i < cpages; ++i) {
                list_del(&failed_pages[i]->lru);
                __free_page(failed_pages[i]);
@@ -485,7 +489,8 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
        caching_array = kmalloc(max_cpages*sizeof(struct page *), GFP_KERNEL);
 
        if (!caching_array) {
-               printk(KERN_ERR "[ttm] unable to allocate table for new pages.");
+               printk(KERN_ERR TTM_PFX
+                      "Unable to allocate table for new pages.");
                return -ENOMEM;
        }
 
@@ -493,12 +498,13 @@ static int ttm_alloc_new_pages(struct list_head *pages, int gfp_flags,
                p = alloc_page(gfp_flags);
 
                if (!p) {
-                       printk(KERN_ERR "[ttm] unable to get page %u\n", i);
+                       printk(KERN_ERR TTM_PFX "Unable to get page %u.\n", i);
 
                        /* store already allocated pages in the pool after
                         * setting the caching state */
                        if (cpages) {
-                               r = ttm_set_pages_caching(caching_array, cstate, cpages);
+                               r = ttm_set_pages_caching(caching_array,
+                                                         cstate, cpages);
                                if (r)
                                        ttm_handle_caching_state_failure(pages,
                                                ttm_flags, cstate,
@@ -569,10 +575,10 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
 
        /* If allocation request is small and there is not enough
         * pages in pool we fill the pool first */
-       if (count < _manager.options.small
+       if (count < _manager->options.small
                && count > pool->npages) {
                struct list_head new_pages;
-               unsigned alloc_size = _manager.options.alloc_size;
+               unsigned alloc_size = _manager->options.alloc_size;
 
                /**
                 * Can't change page caching if in irqsave context. We have to
@@ -590,7 +596,8 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool,
                        ++pool->nrefills;
                        pool->npages += alloc_size;
                } else {
-                       printk(KERN_ERR "[ttm] Failed to fill pool (%p).", pool);
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to fill pool (%p).", pool);
                        /* If we have any pages left put them to the pool. */
                        list_for_each_entry(p, &pool->list, lru) {
                                ++cpages;
@@ -659,7 +666,7 @@ int ttm_get_pages(struct list_head *pages, int flags,
 {
        struct ttm_page_pool *pool = ttm_get_pool(flags, cstate);
        struct page *p = NULL;
-       int gfp_flags = 0;
+       int gfp_flags = GFP_USER;
        int r;
 
        /* set zero flag for page allocation if required */
@@ -671,13 +678,14 @@ int ttm_get_pages(struct list_head *pages, int flags,
                if (flags & TTM_PAGE_FLAG_DMA32)
                        gfp_flags |= GFP_DMA32;
                else
-                       gfp_flags |= __GFP_HIGHMEM;
+                       gfp_flags |= GFP_HIGHUSER;
 
                for (r = 0; r < count; ++r) {
                        p = alloc_page(gfp_flags);
                        if (!p) {
 
-                               printk(KERN_ERR "[ttm] unable to allocate page.");
+                               printk(KERN_ERR TTM_PFX
+                                      "Unable to allocate page.");
                                return -ENOMEM;
                        }
 
@@ -709,8 +717,9 @@ int ttm_get_pages(struct list_head *pages, int flags,
                if (r) {
                        /* If there is any pages in the list put them back to
                         * the pool. */
-                       printk(KERN_ERR "[ttm] Failed to allocate extra pages "
-                                       "for large request.");
+                       printk(KERN_ERR TTM_PFX
+                              "Failed to allocate extra pages "
+                              "for large request.");
                        ttm_put_pages(pages, 0, flags, cstate);
                        return r;
                }
@@ -749,8 +758,8 @@ void ttm_put_pages(struct list_head *pages, unsigned page_count, int flags,
        pool->npages += page_count;
        /* Check that we don't go over the pool limit */
        page_count = 0;
-       if (pool->npages > _manager.options.max_size) {
-               page_count = pool->npages - _manager.options.max_size;
+       if (pool->npages > _manager->options.max_size) {
+               page_count = pool->npages - _manager->options.max_size;
                /* free at least NUM_PAGES_TO_ALLOC number of pages
                 * to reduce calls to set_memory_wb */
                if (page_count < NUM_PAGES_TO_ALLOC)
@@ -775,33 +784,36 @@ static void ttm_page_pool_init_locked(struct ttm_page_pool *pool, int flags,
 int ttm_page_alloc_init(struct ttm_mem_global *glob, unsigned max_pages)
 {
        int ret;
-       if (atomic_add_return(1, &_manager.page_alloc_inited) > 1)
-               return 0;
 
-       printk(KERN_INFO "[ttm] Initializing pool allocator.\n");
+       WARN_ON(_manager);
+
+       printk(KERN_INFO TTM_PFX "Initializing pool allocator.\n");
+
+       _manager = kzalloc(sizeof(*_manager), GFP_KERNEL);
 
-       ttm_page_pool_init_locked(&_manager.wc_pool, GFP_HIGHUSER, "wc");
+       ttm_page_pool_init_locked(&_manager->wc_pool, GFP_HIGHUSER, "wc");
 
-       ttm_page_pool_init_locked(&_manager.uc_pool, GFP_HIGHUSER, "uc");
+       ttm_page_pool_init_locked(&_manager->uc_pool, GFP_HIGHUSER, "uc");
 
-       ttm_page_pool_init_locked(&_manager.wc_pool_dma32, GFP_USER | GFP_DMA32,
-                       "wc dma");
+       ttm_page_pool_init_locked(&_manager->wc_pool_dma32,
+                                 GFP_USER | GFP_DMA32, "wc dma");
 
-       ttm_page_pool_init_locked(&_manager.uc_pool_dma32, GFP_USER | GFP_DMA32,
-                       "uc dma");
+       ttm_page_pool_init_locked(&_manager->uc_pool_dma32,
+                                 GFP_USER | GFP_DMA32, "uc dma");
 
-       _manager.options.max_size = max_pages;
-       _manager.options.small = SMALL_ALLOCATION;
-       _manager.options.alloc_size = NUM_PAGES_TO_ALLOC;
+       _manager->options.max_size = max_pages;
+       _manager->options.small = SMALL_ALLOCATION;
+       _manager->options.alloc_size = NUM_PAGES_TO_ALLOC;
 
-       kobject_init(&_manager.kobj, &ttm_pool_kobj_type);
-       ret = kobject_add(&_manager.kobj, &glob->kobj, "pool");
+       ret = kobject_init_and_add(&_manager->kobj, &ttm_pool_kobj_type,
+                                  &glob->kobj, "pool");
        if (unlikely(ret != 0)) {
-               kobject_put(&_manager.kobj);
+               kobject_put(&_manager->kobj);
+               _manager = NULL;
                return ret;
        }
 
-       ttm_pool_mm_shrink_init(&_manager);
+       ttm_pool_mm_shrink_init(_manager);
 
        return 0;
 }
@@ -810,16 +822,14 @@ void ttm_page_alloc_fini()
 {
        int i;
 
-       if (atomic_sub_return(1, &_manager.page_alloc_inited) > 0)
-               return;
-
-       printk(KERN_INFO "[ttm] Finilizing pool allocator.\n");
-       ttm_pool_mm_shrink_fini(&_manager);
+       printk(KERN_INFO TTM_PFX "Finalizing pool allocator.\n");
+       ttm_pool_mm_shrink_fini(_manager);
 
        for (i = 0; i < NUM_POOLS; ++i)
-               ttm_page_pool_free(&_manager.pools[i], FREE_ALL_PAGES);
+               ttm_page_pool_free(&_manager->pools[i], FREE_ALL_PAGES);
 
-       kobject_put(&_manager.kobj);
+       kobject_put(&_manager->kobj);
+       _manager = NULL;
 }
 
 int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
@@ -827,14 +837,14 @@ int ttm_page_alloc_debugfs(struct seq_file *m, void *data)
        struct ttm_page_pool *p;
        unsigned i;
        char *h[] = {"pool", "refills", "pages freed", "size"};
-       if (atomic_read(&_manager.page_alloc_inited) == 0) {
+       if (!_manager) {
                seq_printf(m, "No pool allocator running.\n");
                return 0;
        }
        seq_printf(m, "%6s %12s %13s %8s\n",
                        h[0], h[1], h[2], h[3]);
        for (i = 0; i < NUM_POOLS; ++i) {
-               p = &_manager.pools[i];
+               p = &_manager->pools[i];
 
                seq_printf(m, "%6s %12ld %13ld %8d\n",
                                p->name, p->nrefills,