]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - arch/powerpc/kernel/setup_64.c
lmb: rename to memblock
[net-next-2.6.git] / arch / powerpc / kernel / setup_64.c
index 643dcac40fcbc56c8cc3102a74e76a6aae4381eb..d135f93cb0f63b5d6f52350c4252a94b103f6018 100644 (file)
@@ -34,7 +34,7 @@
 #include <linux/bootmem.h>
 #include <linux/pci.h>
 #include <linux/lockdep.h>
-#include <linux/lmb.h>
+#include <linux/memblock.h>
 #include <asm/io.h>
 #include <asm/kdump.h>
 #include <asm/prom.h>
@@ -158,7 +158,7 @@ static void __init setup_paca(struct paca_struct *new_paca)
  * the CPU that ignores the top 2 bits of the address in real
  * mode so we can access kernel globals normally provided we
  * only toy with things in the RMO region. From here, we do
- * some early parsing of the device-tree to setup out LMB
+ * some early parsing of the device-tree to setup out MEMBLOCK
  * data structures, and allocate & initialize the hash table
  * and segment tables so we can start running with translation
  * enabled.
@@ -404,7 +404,7 @@ void __init setup_system(void)
 
        printk("-----------------------------------------------------\n");
        printk("ppc64_pft_size                = 0x%llx\n", ppc64_pft_size);
-       printk("physicalMemorySize            = 0x%llx\n", lmb_phys_mem_size());
+       printk("physicalMemorySize            = 0x%llx\n", memblock_phys_mem_size());
        if (ppc64_caches.dline_size != 0x80)
                printk("ppc64_caches.dcache_line_size = 0x%x\n",
                       ppc64_caches.dline_size);
@@ -443,10 +443,10 @@ static void __init irqstack_early_init(void)
         */
        for_each_possible_cpu(i) {
                softirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc_base(THREAD_SIZE,
+                       __va(memblock_alloc_base(THREAD_SIZE,
                                            THREAD_SIZE, limit));
                hardirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc_base(THREAD_SIZE,
+                       __va(memblock_alloc_base(THREAD_SIZE,
                                            THREAD_SIZE, limit));
        }
 }
@@ -458,11 +458,11 @@ static void __init exc_lvl_early_init(void)
 
        for_each_possible_cpu(i) {
                critirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                dbgirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
                mcheckirq_ctx[i] = (struct thread_info *)
-                       __va(lmb_alloc(THREAD_SIZE, THREAD_SIZE));
+                       __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE));
        }
 }
 #else
@@ -487,11 +487,11 @@ static void __init emergency_stack_init(void)
         * bringup, we need to get at them in real mode. This means they
         * must also be within the RMO region.
         */
-       limit = min(slb0_limit(), lmb.rmo_size);
+       limit = min(slb0_limit(), memblock.rmo_size);
 
        for_each_possible_cpu(i) {
                unsigned long sp;
-               sp  = lmb_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
+               sp  = memblock_alloc_base(THREAD_SIZE, THREAD_SIZE, limit);
                sp += THREAD_SIZE;
                paca[i].emergency_sp = __va(sp);
        }