]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
page allocator: get the pageblock migratetype without disabling interrupts
authorMel Gorman <mel@csn.ul.ie>
Tue, 16 Jun 2009 22:32:14 +0000 (15:32 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 17 Jun 2009 02:47:35 +0000 (19:47 -0700)
Local interrupts are disabled when freeing pages to the PCP list.  Part of
that free checks what the migratetype of the pageblock the page is in but
it checks this with interrupts disabled and interupts should never be
disabled longer than necessary.  This patch checks the pagetype with
interrupts enabled with the impact that it is possible a page is freed to
the wrong list when a pageblock changes type.  As that block is now
already considered mixed from an anti-fragmentation perspective, it's not
of vital importance.

Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Pekka Enberg <penberg@cs.helsinki.fi>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Dave Hansen <dave@linux.vnet.ibm.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/page_alloc.c

index d56e377ad0852d06cc8ad1de57f03d4636dd0c29..e60e41474332301f2f67426375a47b0f01d9f65a 100644 (file)
@@ -1030,6 +1030,7 @@ static void free_hot_cold_page(struct page *page, int cold)
        kernel_map_pages(page, 1, 0);
 
        pcp = &zone_pcp(zone, get_cpu())->pcp;
+       set_page_private(page, get_pageblock_migratetype(page));
        local_irq_save(flags);
        if (unlikely(clearMlocked))
                free_page_mlock(page);
@@ -1039,7 +1040,6 @@ static void free_hot_cold_page(struct page *page, int cold)
                list_add_tail(&page->lru, &pcp->list);
        else
                list_add(&page->lru, &pcp->list);
-       set_page_private(page, get_pageblock_migratetype(page));
        pcp->count++;
        if (pcp->count >= pcp->high) {
                free_pages_bulk(zone, pcp->batch, &pcp->list, 0);