]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'hotplug' into devel
authorRussell King <rmk+kernel@arm.linux.org.uk>
Mon, 18 Oct 2010 21:34:47 +0000 (22:34 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Mon, 18 Oct 2010 21:34:47 +0000 (22:34 +0100)
Conflicts:
arch/arm/kernel/head-common.S

1  2 
arch/arm/kernel/head-common.S
arch/arm/kernel/head.S
arch/arm/kernel/smp.c
arch/arm/kernel/vmlinux.lds.S
arch/arm/mm/proc-v6.S
arch/arm/mm/proc-v7.S

index 58a3e632b6d5bf76e02bb74fbbb4f386f076c9f9,c4effcfba1ebbe2c617c6e29189479d6ce3909b6..bbecaac1e0135132dd7b208735fd18130030ec76
@@@ -265,3 -131,150 +131,150 @@@ __vet_atags
  1:    mov     r2, #0
        mov     pc, lr
  ENDPROC(__vet_atags)
 -      .long   _data                           @ r5
+ /*
+  * The following fragment of code is executed with the MMU on in MMU mode,
+  * and uses absolute addresses; this is not position independent.
+  *
+  *  r0  = cp#15 control register
+  *  r1  = machine ID
+  *  r2  = atags pointer
+  *  r9  = processor ID
+  */
+       __INIT
+ __mmap_switched:
+       adr     r3, __mmap_switched_data
+       ldmia   r3!, {r4, r5, r6, r7}
+       cmp     r4, r5                          @ Copy data segment if needed
+ 1:    cmpne   r5, r6
+       ldrne   fp, [r4], #4
+       strne   fp, [r5], #4
+       bne     1b
+       mov     fp, #0                          @ Clear BSS (and zero fp)
+ 1:    cmp     r6, r7
+       strcc   fp, [r6],#4
+       bcc     1b
+  ARM( ldmia   r3, {r4, r5, r6, r7, sp})
+  THUMB(       ldmia   r3, {r4, r5, r6, r7}    )
+  THUMB(       ldr     sp, [r3, #16]           )
+       str     r9, [r4]                        @ Save processor ID
+       str     r1, [r5]                        @ Save machine type
+       str     r2, [r6]                        @ Save atags pointer
+       bic     r4, r0, #CR_A                   @ Clear 'A' bit
+       stmia   r7, {r0, r4}                    @ Save control register values
+       b       start_kernel
+ ENDPROC(__mmap_switched)
+       .align  2
+       .type   __mmap_switched_data, %object
+ __mmap_switched_data:
+       .long   __data_loc                      @ r4
++      .long   _sdata                          @ r5
+       .long   __bss_start                     @ r6
+       .long   _end                            @ r7
+       .long   processor_id                    @ r4
+       .long   __machine_arch_type             @ r5
+       .long   __atags_pointer                 @ r6
+       .long   cr_alignment                    @ r7
+       .long   init_thread_union + THREAD_START_SP @ sp
+       .size   __mmap_switched_data, . - __mmap_switched_data
+ /*
+  * This provides a C-API version of __lookup_machine_type
+  */
+ ENTRY(lookup_machine_type)
+       stmfd   sp!, {r4 - r6, lr}
+       mov     r1, r0
+       bl      __lookup_machine_type
+       mov     r0, r5
+       ldmfd   sp!, {r4 - r6, pc}
+ ENDPROC(lookup_machine_type)
+ /*
+  * This provides a C-API version of __lookup_processor_type
+  */
+ ENTRY(lookup_processor_type)
+       stmfd   sp!, {r4 - r6, r9, lr}
+       mov     r9, r0
+       bl      __lookup_processor_type
+       mov     r0, r5
+       ldmfd   sp!, {r4 - r6, r9, pc}
+ ENDPROC(lookup_processor_type)
+ /*
+  * Read processor ID register (CP#15, CR0), and look up in the linker-built
+  * supported processor list.  Note that we can't use the absolute addresses
+  * for the __proc_info lists since we aren't running with the MMU on
+  * (and therefore, we are not in the correct address space).  We have to
+  * calculate the offset.
+  *
+  *    r9 = cpuid
+  * Returns:
+  *    r3, r4, r6 corrupted
+  *    r5 = proc_info pointer in physical address space
+  *    r9 = cpuid (preserved)
+  */
+       __CPUINIT
+ __lookup_processor_type:
+       adr     r3, __lookup_processor_type_data
+       ldmia   r3, {r4 - r6}
+       sub     r3, r3, r4                      @ get offset between virt&phys
+       add     r5, r5, r3                      @ convert virt addresses to
+       add     r6, r6, r3                      @ physical address space
+ 1:    ldmia   r5, {r3, r4}                    @ value, mask
+       and     r4, r4, r9                      @ mask wanted bits
+       teq     r3, r4
+       beq     2f
+       add     r5, r5, #PROC_INFO_SZ           @ sizeof(proc_info_list)
+       cmp     r5, r6
+       blo     1b
+       mov     r5, #0                          @ unknown processor
+ 2:    mov     pc, lr
+ ENDPROC(__lookup_processor_type)
+ /*
+  * Look in <asm/procinfo.h> for information about the __proc_info structure.
+  */
+       .align  2
+       .type   __lookup_processor_type_data, %object
+ __lookup_processor_type_data:
+       .long   .
+       .long   __proc_info_begin
+       .long   __proc_info_end
+       .size   __lookup_processor_type_data, . - __lookup_processor_type_data
+ __error_p:
+ #ifdef CONFIG_DEBUG_LL
+       adr     r0, str_p1
+       bl      printascii
+       mov     r0, r9
+       bl      printhex8
+       adr     r0, str_p2
+       bl      printascii
+       b       __error
+ str_p1:       .asciz  "\nError: unrecognized/unsupported processor variant (0x"
+ str_p2:       .asciz  ").\n"
+       .align
+ #endif
+ ENDPROC(__error_p)
+ __error:
+ #ifdef CONFIG_ARCH_RPC
+ /*
+  * Turn the screen red on a error - RiscPC only.
+  */
+       mov     r0, #0x02000000
+       mov     r3, #0x11
+       orr     r3, r3, r3, lsl #8
+       orr     r3, r3, r3, lsl #16
+       str     r3, [r0], #4
+       str     r3, [r0], #4
+       str     r3, [r0], #4
+       str     r3, [r0], #4
+ #endif
+ 1:    mov     r0, r0
+       b       1b
+ ENDPROC(__error)
index b44d21e1e344f16cdb5c228add7a78439b3631fd,c11dd14afc11c903b8b52b4e6dadc8b92c54cea4..767390449e0a8804598bcd923cf30076693d8c7b
@@@ -335,52 -244,121 +247,168 @@@ __create_page_tables
        mov     pc, lr
  ENDPROC(__create_page_tables)
        .ltorg
+ __enable_mmu_loc:
+       .long   .
+       .long   __enable_mmu
+       .long   __enable_mmu_end
+ #if defined(CONFIG_SMP)
+       __CPUINIT
+ ENTRY(secondary_startup)
+       /*
+        * Common entry point for secondary CPUs.
+        *
+        * Ensure that we're in SVC mode, and IRQs are disabled.  Lookup
+        * the processor type - there is no need to check the machine type
+        * as it has already been validated by the primary processor.
+        */
+       setmode PSR_F_BIT | PSR_I_BIT | SVC_MODE, r9
+       mrc     p15, 0, r9, c0, c0              @ get processor id
+       bl      __lookup_processor_type
+       movs    r10, r5                         @ invalid processor?
+       moveq   r0, #'p'                        @ yes, error 'p'
+       beq     __error_p
+       /*
+        * Use the page tables supplied from  __cpu_up.
+        */
+       adr     r4, __secondary_data
+       ldmia   r4, {r5, r7, r12}               @ address to jump to after
+       sub     r4, r4, r5                      @ mmu has been enabled
+       ldr     r4, [r7, r4]                    @ get secondary_data.pgdir
+       adr     lr, BSYM(__enable_mmu)          @ return address
+       mov     r13, r12                        @ __secondary_switched address
+  ARM( add     pc, r10, #PROCINFO_INITFUNC     ) @ initialise processor
+                                                 @ (return control reg)
+  THUMB(       add     r12, r10, #PROCINFO_INITFUNC    )
+  THUMB(       mov     pc, r12                         )
+ ENDPROC(secondary_startup)
+       /*
+        * r6  = &secondary_data
+        */
+ ENTRY(__secondary_switched)
+       ldr     sp, [r7, #4]                    @ get secondary_data.stack
+       mov     fp, #0
+       b       secondary_start_kernel
+ ENDPROC(__secondary_switched)
+       .type   __secondary_data, %object
+ __secondary_data:
+       .long   .
+       .long   secondary_data
+       .long   __secondary_switched
+ #endif /* defined(CONFIG_SMP) */
+ /*
+  * Setup common bits before finally enabling the MMU.  Essentially
+  * this is just loading the page table pointer and domain access
+  * registers.
+  *
+  *  r0  = cp#15 control register
+  *  r1  = machine ID
+  *  r2  = atags pointer
+  *  r4  = page table pointer
+  *  r9  = processor ID
+  *  r13 = *virtual* address to jump to upon completion
+  */
+ __enable_mmu:
+ #ifdef CONFIG_ALIGNMENT_TRAP
+       orr     r0, r0, #CR_A
+ #else
+       bic     r0, r0, #CR_A
+ #endif
+ #ifdef CONFIG_CPU_DCACHE_DISABLE
+       bic     r0, r0, #CR_C
+ #endif
+ #ifdef CONFIG_CPU_BPREDICT_DISABLE
+       bic     r0, r0, #CR_Z
+ #endif
+ #ifdef CONFIG_CPU_ICACHE_DISABLE
+       bic     r0, r0, #CR_I
+ #endif
+       mov     r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
+                     domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
+                     domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
+                     domain_val(DOMAIN_IO, DOMAIN_CLIENT))
+       mcr     p15, 0, r5, c3, c0, 0           @ load domain access register
+       mcr     p15, 0, r4, c2, c0, 0           @ load page table pointer
+       b       __turn_mmu_on
+ ENDPROC(__enable_mmu)
+ /*
+  * Enable the MMU.  This completely changes the structure of the visible
+  * memory space.  You will not be able to trace execution through this.
+  * If you have an enquiry about this, *please* check the linux-arm-kernel
+  * mailing list archives BEFORE sending another post to the list.
+  *
+  *  r0  = cp#15 control register
+  *  r1  = machine ID
+  *  r2  = atags pointer
+  *  r9  = processor ID
+  *  r13 = *virtual* address to jump to upon completion
+  *
+  * other registers depend on the function called upon completion
+  */
+       .align  5
+ __turn_mmu_on:
+       mov     r0, r0
+       mcr     p15, 0, r0, c1, c0, 0           @ write control reg
+       mrc     p15, 0, r3, c0, c0, 0           @ read id reg
+       mov     r3, r3
+       mov     r3, r13
+       mov     pc, r3
+ __enable_mmu_end:
+ ENDPROC(__turn_mmu_on)
  
 +#ifdef CONFIG_SMP_ON_UP
 +__fixup_smp:
 +      mov     r7, #0x00070000
 +      orr     r6, r7, #0xff000000     @ mask 0xff070000
 +      orr     r7, r7, #0x41000000     @ val 0x41070000
 +      and     r0, r9, r6
 +      teq     r0, r7                  @ ARM CPU and ARMv6/v7?
 +      bne     __fixup_smp_on_up       @ no, assume UP
 +
 +      orr     r6, r6, #0x0000ff00
 +      orr     r6, r6, #0x000000f0     @ mask 0xff07fff0
 +      orr     r7, r7, #0x0000b000
 +      orr     r7, r7, #0x00000020     @ val 0x4107b020
 +      and     r0, r9, r6
 +      teq     r0, r7                  @ ARM 11MPCore?
 +      moveq   pc, lr                  @ yes, assume SMP
 +
 +      mrc     p15, 0, r0, c0, c0, 5   @ read MPIDR
 +      tst     r0, #1 << 31
 +      movne   pc, lr                  @ bit 31 => SMP
 +
 +__fixup_smp_on_up:
 +      adr     r0, 1f
 +      ldmia   r0, {r3, r6, r7}
 +      sub     r3, r0, r3
 +      add     r6, r6, r3
 +      add     r7, r7, r3
 +2:    cmp     r6, r7
 +      ldmia   r6!, {r0, r4}
 +      strlo   r4, [r0, r3]
 +      blo     2b
 +      mov     pc, lr
 +ENDPROC(__fixup_smp)
 +
 +1:    .word   .
 +      .word   __smpalt_begin
 +      .word   __smpalt_end
 +
 +      .pushsection .data
 +      .globl  smp_on_up
 +smp_on_up:
 +      ALT_SMP(.long   1)
 +      ALT_UP(.long    0)
 +      .popsection
 +
 +#endif
 +
  #include "head-common.S"
Simple merge
Simple merge
Simple merge
Simple merge