vmlinux.lds.S 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227
  1. #include <asm/cache.h>
  2. #include <asm/ptrace.h>
  3. #include <asm/pgtable.h>
  4. #include <asm-generic/vmlinux.lds.h>
  5. OUTPUT_FORMAT("elf64-ia64-little")
  6. OUTPUT_ARCH(ia64)
  7. ENTRY(phys_start)
  8. jiffies = jiffies_64;
  9. PHDRS {
  10. code PT_LOAD;
  11. percpu PT_LOAD;
  12. data PT_LOAD;
  13. note PT_NOTE;
  14. unwind 0x70000001; /* PT_IA_64_UNWIND, but ld doesn't match the name */
  15. }
  16. SECTIONS {
  17. /*
  18. * unwind exit sections must be discarded before
  19. * the rest of the sections get included.
  20. */
  21. /DISCARD/ : {
  22. *(.IA_64.unwind.exit.text)
  23. *(.IA_64.unwind_info.exit.text)
  24. *(.comment)
  25. *(.note)
  26. }
  27. v = PAGE_OFFSET; /* this symbol is here to make debugging easier... */
  28. phys_start = _start - LOAD_OFFSET;
  29. code : {
  30. } :code
  31. . = KERNEL_START;
  32. _text = .;
  33. _stext = .;
  34. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  35. __start_ivt_text = .;
  36. *(.text..ivt)
  37. __end_ivt_text = .;
  38. TEXT_TEXT
  39. SCHED_TEXT
  40. LOCK_TEXT
  41. KPROBES_TEXT
  42. *(.gnu.linkonce.t*)
  43. }
  44. .text2 : AT(ADDR(.text2) - LOAD_OFFSET) {
  45. *(.text2)
  46. }
  47. #ifdef CONFIG_SMP
  48. .text..lock : AT(ADDR(.text..lock) - LOAD_OFFSET) {
  49. *(.text..lock)
  50. }
  51. #endif
  52. _etext = .;
  53. /*
  54. * Read-only data
  55. */
  56. NOTES :code :note /* put .notes in text and mark in PT_NOTE */
  57. code_continues : {
  58. } : code /* switch back to regular program... */
  59. EXCEPTION_TABLE(16)
  60. /* MCA table */
  61. . = ALIGN(16);
  62. __mca_table : AT(ADDR(__mca_table) - LOAD_OFFSET) {
  63. __start___mca_table = .;
  64. *(__mca_table)
  65. __stop___mca_table = .;
  66. }
  67. .data..patch.phys_stack_reg : AT(ADDR(.data..patch.phys_stack_reg) - LOAD_OFFSET) {
  68. __start___phys_stack_reg_patchlist = .;
  69. *(.data..patch.phys_stack_reg)
  70. __end___phys_stack_reg_patchlist = .;
  71. }
  72. /*
  73. * Global data
  74. */
  75. _data = .;
  76. /* Unwind info & table: */
  77. . = ALIGN(8);
  78. .IA_64.unwind_info : AT(ADDR(.IA_64.unwind_info) - LOAD_OFFSET) {
  79. *(.IA_64.unwind_info*)
  80. }
  81. .IA_64.unwind : AT(ADDR(.IA_64.unwind) - LOAD_OFFSET) {
  82. __start_unwind = .;
  83. *(.IA_64.unwind*)
  84. __end_unwind = .;
  85. } :code :unwind
  86. code_continues2 : {
  87. } : code
  88. RODATA
  89. .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
  90. *(.opd)
  91. }
  92. /*
  93. * Initialization code and data:
  94. */
  95. . = ALIGN(PAGE_SIZE);
  96. __init_begin = .;
  97. INIT_TEXT_SECTION(PAGE_SIZE)
  98. INIT_DATA_SECTION(16)
  99. .data..patch.vtop : AT(ADDR(.data..patch.vtop) - LOAD_OFFSET) {
  100. __start___vtop_patchlist = .;
  101. *(.data..patch.vtop)
  102. __end___vtop_patchlist = .;
  103. }
  104. .data..patch.rse : AT(ADDR(.data..patch.rse) - LOAD_OFFSET) {
  105. __start___rse_patchlist = .;
  106. *(.data..patch.rse)
  107. __end___rse_patchlist = .;
  108. }
  109. .data..patch.mckinley_e9 : AT(ADDR(.data..patch.mckinley_e9) - LOAD_OFFSET) {
  110. __start___mckinley_e9_bundles = .;
  111. *(.data..patch.mckinley_e9)
  112. __end___mckinley_e9_bundles = .;
  113. }
  114. #if defined(CONFIG_IA64_GENERIC)
  115. /* Machine Vector */
  116. . = ALIGN(16);
  117. .machvec : AT(ADDR(.machvec) - LOAD_OFFSET) {
  118. machvec_start = .;
  119. *(.machvec)
  120. machvec_end = .;
  121. }
  122. #endif
  123. #ifdef CONFIG_SMP
  124. . = ALIGN(PERCPU_PAGE_SIZE);
  125. __cpu0_per_cpu = .;
  126. . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
  127. #endif
  128. . = ALIGN(PAGE_SIZE);
  129. __init_end = .;
  130. .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
  131. PAGE_ALIGNED_DATA(PAGE_SIZE)
  132. . = ALIGN(PAGE_SIZE);
  133. __start_gate_section = .;
  134. *(.data..gate)
  135. __stop_gate_section = .;
  136. }
  137. /*
  138. * make sure the gate page doesn't expose
  139. * kernel data
  140. */
  141. . = ALIGN(PAGE_SIZE);
  142. /* Per-cpu data: */
  143. . = ALIGN(PERCPU_PAGE_SIZE);
  144. PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
  145. __phys_per_cpu_start = __per_cpu_load;
  146. /*
  147. * ensure percpu data fits
  148. * into percpu page size
  149. */
  150. . = __phys_per_cpu_start + PERCPU_PAGE_SIZE;
  151. data : {
  152. } :data
  153. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  154. _sdata = .;
  155. INIT_TASK_DATA(PAGE_SIZE)
  156. CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
  157. READ_MOSTLY_DATA(SMP_CACHE_BYTES)
  158. DATA_DATA
  159. *(.data1)
  160. *(.gnu.linkonce.d*)
  161. CONSTRUCTORS
  162. }
  163. . = ALIGN(16); /* gp must be 16-byte aligned for exc. table */
  164. .got : AT(ADDR(.got) - LOAD_OFFSET) {
  165. *(.got.plt)
  166. *(.got)
  167. }
  168. __gp = ADDR(.got) + 0x200000;
  169. /*
  170. * We want the small data sections together,
  171. * so single-instruction offsets can access
  172. * them all, and initialized data all before
  173. * uninitialized, so we can shorten the
  174. * on-disk segment size.
  175. */
  176. .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
  177. *(.sdata)
  178. *(.sdata1)
  179. *(.srdata)
  180. }
  181. _edata = .;
  182. BSS_SECTION(0, 0, 0)
  183. _end = .;
  184. code : {
  185. } :code
  186. STABS_DEBUG
  187. DWARF_DEBUG
  188. /* Default discards */
  189. DISCARDS
  190. }