2 #define PROVIDE32(x) PROVIDE(__unused__##x)
4 #define PROVIDE32(x) PROVIDE(x)
7 #include <asm-generic/vmlinux.lds.h>
13 kernel PT_LOAD FLAGS(7); /* RWX */
14 notes PT_NOTE FLAGS(0);
15 dummy PT_NOTE FLAGS(0);
17 /* binutils < 2.18 has a bug that makes it misbehave when taking an
18 ELF file with all segments at load address 0 as input. This
19 happens when running "strip" on vmlinux, because of the AT() magic
20 in this linker script. People using GCC >= 4.2 won't run into
21 this problem, because the "build-id" support will put some data
22 into the "notes" segment (at a non-zero load address).
24 To work around this, we force some data into both the "dummy"
25 segment and the kernel segment, so the dummy segment will get a
26 non-zero load address. It's not enough to always create the
27 "notes" segment, since if nothing gets assigned to it, its load
28 address will be zero. */
32 OUTPUT_ARCH(powerpc:common64)
35 OUTPUT_ARCH(powerpc:common)
36 jiffies = jiffies_64 + 4;
40 /* Sections to be discarded. */
50 * Text, read only data and other permanent read-only sections
54 .text : AT(ADDR(.text) - LOAD_OFFSET) {
58 /* careful! __ftr_alt_* sections need to be close to .text */
59 *(.text .fixup __ftr_alt_* .ref.text)
70 #endif /* CONFIG_PPC32 */
76 PROVIDE32 (etext = .);
81 /* Exception & bug tables */
82 __ex_table : AT(ADDR(__ex_table) - LOAD_OFFSET) {
83 __start___ex_table = .;
85 __stop___ex_table = .;
90 /* The dummy segment contents for the bug workaround mentioned above
92 .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
99 * Init sections discarded at runtime
101 . = ALIGN(PAGE_SIZE);
104 .init.text : AT(ADDR(.init.text) - LOAD_OFFSET) {
110 /* .exit.text is discarded at runtime, not link time,
111 * to deal with references from __bug_table
113 .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
117 .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
119 __vtop_table_begin = .;
121 __vtop_table_end = .;
122 __ptov_table_begin = .;
124 __ptov_table_end = .;
125 #ifdef CONFIG_PPC_ISERIES
126 __dt_strings_start = .;
128 __dt_strings_end = .;
133 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
139 .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
140 __initcall_start = .;
145 .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
146 __con_initcall_start = .;
147 *(.con_initcall.init)
148 __con_initcall_end = .;
154 __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
155 __start___ftr_fixup = .;
157 __stop___ftr_fixup = .;
160 __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
161 __start___mmu_ftr_fixup = .;
163 __stop___mmu_ftr_fixup = .;
166 __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
167 __start___lwsync_fixup = .;
169 __stop___lwsync_fixup = .;
173 __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
174 __start___fw_ftr_fixup = .;
176 __stop___fw_ftr_fixup = .;
179 #ifdef CONFIG_BLK_DEV_INITRD
180 . = ALIGN(PAGE_SIZE);
181 .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
182 __initramfs_start = .;
190 .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
191 __machine_desc_start = . ;
193 __machine_desc_end = . ;
195 #ifdef CONFIG_RELOCATABLE
197 .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET) { *(.dynsym) }
198 .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
199 .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
204 .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
205 .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
206 .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
208 __rela_dyn_start = .;
213 /* freed after init ends here */
214 . = ALIGN(PAGE_SIZE);
218 * And now the various read/write data
221 . = ALIGN(PAGE_SIZE);
225 .data : AT(ADDR(.data) - LOAD_OFFSET) {
231 .data : AT(ADDR(.data) - LOAD_OFFSET) {
238 .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
242 .got : AT(ADDR(.got) - LOAD_OFFSET) {
249 . = ALIGN(PAGE_SIZE);
251 PROVIDE32 (edata = .);
253 /* The initial task and kernel stack */
259 .data.init_task : AT(ADDR(.data.init_task) - LOAD_OFFSET) {
263 . = ALIGN(PAGE_SIZE);
264 .data.page_aligned : AT(ADDR(.data.page_aligned) - LOAD_OFFSET) {
265 *(.data.page_aligned)
268 . = ALIGN(L1_CACHE_BYTES);
269 .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) {
270 *(.data.cacheline_aligned)
273 . = ALIGN(L1_CACHE_BYTES);
274 .data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET) {
278 . = ALIGN(PAGE_SIZE);
279 .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
282 . = ALIGN(PAGE_SIZE);
287 * And finally the bss
290 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
299 . = ALIGN(PAGE_SIZE);