init_32.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964
  1. /*
  2. *
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
  6. */
  7. #include <linux/module.h>
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/hugetlb.h>
  18. #include <linux/swap.h>
  19. #include <linux/smp.h>
  20. #include <linux/init.h>
  21. #include <linux/highmem.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/pci.h>
  24. #include <linux/pfn.h>
  25. #include <linux/poison.h>
  26. #include <linux/bootmem.h>
  27. #include <linux/memblock.h>
  28. #include <linux/proc_fs.h>
  29. #include <linux/memory_hotplug.h>
  30. #include <linux/initrd.h>
  31. #include <linux/cpumask.h>
  32. #include <linux/gfp.h>
  33. #include <asm/asm.h>
  34. #include <asm/bios_ebda.h>
  35. #include <asm/processor.h>
  36. #include <asm/uaccess.h>
  37. #include <asm/pgtable.h>
  38. #include <asm/dma.h>
  39. #include <asm/fixmap.h>
  40. #include <asm/e820.h>
  41. #include <asm/apic.h>
  42. #include <asm/bugs.h>
  43. #include <asm/tlb.h>
  44. #include <asm/tlbflush.h>
  45. #include <asm/olpc_ofw.h>
  46. #include <asm/pgalloc.h>
  47. #include <asm/sections.h>
  48. #include <asm/paravirt.h>
  49. #include <asm/setup.h>
  50. #include <asm/cacheflush.h>
  51. #include <asm/page_types.h>
  52. #include <asm/init.h>
  53. #include "mm_internal.h"
  54. unsigned long highstart_pfn, highend_pfn;
  55. static noinline int do_test_wp_bit(void);
  56. bool __read_mostly __vmalloc_start_set = false;
  57. /*
  58. * Creates a middle page table and puts a pointer to it in the
  59. * given global directory entry. This only returns the gd entry
  60. * in non-PAE compilation mode, since the middle layer is folded.
  61. */
  62. static pmd_t * __init one_md_table_init(pgd_t *pgd)
  63. {
  64. pud_t *pud;
  65. pmd_t *pmd_table;
  66. #ifdef CONFIG_X86_PAE
  67. if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
  68. pmd_table = (pmd_t *)alloc_low_page();
  69. paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
  70. set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
  71. pud = pud_offset(pgd, 0);
  72. BUG_ON(pmd_table != pmd_offset(pud, 0));
  73. return pmd_table;
  74. }
  75. #endif
  76. pud = pud_offset(pgd, 0);
  77. pmd_table = pmd_offset(pud, 0);
  78. return pmd_table;
  79. }
  80. /*
  81. * Create a page table and place a pointer to it in a middle page
  82. * directory entry:
  83. */
  84. static pte_t * __init one_page_table_init(pmd_t *pmd)
  85. {
  86. if (!(pmd_val(*pmd) & _PAGE_PRESENT)) {
  87. pte_t *page_table = (pte_t *)alloc_low_page();
  88. paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
  89. set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
  90. BUG_ON(page_table != pte_offset_kernel(pmd, 0));
  91. }
  92. return pte_offset_kernel(pmd, 0);
  93. }
  94. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  95. {
  96. int pgd_idx = pgd_index(vaddr);
  97. int pmd_idx = pmd_index(vaddr);
  98. return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx;
  99. }
  100. pte_t * __init populate_extra_pte(unsigned long vaddr)
  101. {
  102. int pte_idx = pte_index(vaddr);
  103. pmd_t *pmd;
  104. pmd = populate_extra_pmd(vaddr);
  105. return one_page_table_init(pmd) + pte_idx;
  106. }
  107. static unsigned long __init
  108. page_table_range_init_count(unsigned long start, unsigned long end)
  109. {
  110. unsigned long count = 0;
  111. #ifdef CONFIG_HIGHMEM
  112. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  113. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  114. int pgd_idx, pmd_idx;
  115. unsigned long vaddr;
  116. if (pmd_idx_kmap_begin == pmd_idx_kmap_end)
  117. return 0;
  118. vaddr = start;
  119. pgd_idx = pgd_index(vaddr);
  120. pmd_idx = pmd_index(vaddr);
  121. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd_idx++) {
  122. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  123. pmd_idx++) {
  124. if ((vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin &&
  125. (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end)
  126. count++;
  127. vaddr += PMD_SIZE;
  128. }
  129. pmd_idx = 0;
  130. }
  131. #endif
  132. return count;
  133. }
  134. static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd,
  135. unsigned long vaddr, pte_t *lastpte,
  136. void **adr)
  137. {
  138. #ifdef CONFIG_HIGHMEM
  139. /*
  140. * Something (early fixmap) may already have put a pte
  141. * page here, which causes the page table allocation
  142. * to become nonlinear. Attempt to fix it, and if it
  143. * is still nonlinear then we have to bug.
  144. */
  145. int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT;
  146. int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT;
  147. if (pmd_idx_kmap_begin != pmd_idx_kmap_end
  148. && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin
  149. && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end) {
  150. pte_t *newpte;
  151. int i;
  152. BUG_ON(after_bootmem);
  153. newpte = *adr;
  154. for (i = 0; i < PTRS_PER_PTE; i++)
  155. set_pte(newpte + i, pte[i]);
  156. *adr = (void *)(((unsigned long)(*adr)) + PAGE_SIZE);
  157. paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT);
  158. set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE));
  159. BUG_ON(newpte != pte_offset_kernel(pmd, 0));
  160. __flush_tlb_all();
  161. paravirt_release_pte(__pa(pte) >> PAGE_SHIFT);
  162. pte = newpte;
  163. }
  164. BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1)
  165. && vaddr > fix_to_virt(FIX_KMAP_END)
  166. && lastpte && lastpte + PTRS_PER_PTE != pte);
  167. #endif
  168. return pte;
  169. }
  170. /*
  171. * This function initializes a certain range of kernel virtual memory
  172. * with new bootmem page tables, everywhere page tables are missing in
  173. * the given range.
  174. *
  175. * NOTE: The pagetables are allocated contiguous on the physical space
  176. * so we can cache the place of the first one and move around without
  177. * checking the pgd every time.
  178. */
  179. static void __init
  180. page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
  181. {
  182. int pgd_idx, pmd_idx;
  183. unsigned long vaddr;
  184. pgd_t *pgd;
  185. pmd_t *pmd;
  186. pte_t *pte = NULL;
  187. unsigned long count = page_table_range_init_count(start, end);
  188. void *adr = NULL;
  189. if (count)
  190. adr = alloc_low_pages(count);
  191. vaddr = start;
  192. pgd_idx = pgd_index(vaddr);
  193. pmd_idx = pmd_index(vaddr);
  194. pgd = pgd_base + pgd_idx;
  195. for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
  196. pmd = one_md_table_init(pgd);
  197. pmd = pmd + pmd_index(vaddr);
  198. for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
  199. pmd++, pmd_idx++) {
  200. pte = page_table_kmap_check(one_page_table_init(pmd),
  201. pmd, vaddr, pte, &adr);
  202. vaddr += PMD_SIZE;
  203. }
  204. pmd_idx = 0;
  205. }
  206. }
  207. static inline int is_kernel_text(unsigned long addr)
  208. {
  209. if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
  210. return 1;
  211. return 0;
  212. }
  213. /*
  214. * This maps the physical memory to kernel virtual address space, a total
  215. * of max_low_pfn pages, by creating page tables starting from address
  216. * PAGE_OFFSET:
  217. */
  218. unsigned long __init
  219. kernel_physical_mapping_init(unsigned long start,
  220. unsigned long end,
  221. unsigned long page_size_mask)
  222. {
  223. int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
  224. unsigned long last_map_addr = end;
  225. unsigned long start_pfn, end_pfn;
  226. pgd_t *pgd_base = swapper_pg_dir;
  227. int pgd_idx, pmd_idx, pte_ofs;
  228. unsigned long pfn;
  229. pgd_t *pgd;
  230. pmd_t *pmd;
  231. pte_t *pte;
  232. unsigned pages_2m, pages_4k;
  233. int mapping_iter;
  234. start_pfn = start >> PAGE_SHIFT;
  235. end_pfn = end >> PAGE_SHIFT;
  236. /*
  237. * First iteration will setup identity mapping using large/small pages
  238. * based on use_pse, with other attributes same as set by
  239. * the early code in head_32.S
  240. *
  241. * Second iteration will setup the appropriate attributes (NX, GLOBAL..)
  242. * as desired for the kernel identity mapping.
  243. *
  244. * This two pass mechanism conforms to the TLB app note which says:
  245. *
  246. * "Software should not write to a paging-structure entry in a way
  247. * that would change, for any linear address, both the page size
  248. * and either the page frame or attributes."
  249. */
  250. mapping_iter = 1;
  251. if (!cpu_has_pse)
  252. use_pse = 0;
  253. repeat:
  254. pages_2m = pages_4k = 0;
  255. pfn = start_pfn;
  256. pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  257. pgd = pgd_base + pgd_idx;
  258. for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
  259. pmd = one_md_table_init(pgd);
  260. if (pfn >= end_pfn)
  261. continue;
  262. #ifdef CONFIG_X86_PAE
  263. pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  264. pmd += pmd_idx;
  265. #else
  266. pmd_idx = 0;
  267. #endif
  268. for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
  269. pmd++, pmd_idx++) {
  270. unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
  271. /*
  272. * Map with big pages if possible, otherwise
  273. * create normal page tables:
  274. */
  275. if (use_pse) {
  276. unsigned int addr2;
  277. pgprot_t prot = PAGE_KERNEL_LARGE;
  278. /*
  279. * first pass will use the same initial
  280. * identity mapping attribute + _PAGE_PSE.
  281. */
  282. pgprot_t init_prot =
  283. __pgprot(PTE_IDENT_ATTR |
  284. _PAGE_PSE);
  285. pfn &= PMD_MASK >> PAGE_SHIFT;
  286. addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
  287. PAGE_OFFSET + PAGE_SIZE-1;
  288. if (is_kernel_text(addr) ||
  289. is_kernel_text(addr2))
  290. prot = PAGE_KERNEL_LARGE_EXEC;
  291. pages_2m++;
  292. if (mapping_iter == 1)
  293. set_pmd(pmd, pfn_pmd(pfn, init_prot));
  294. else
  295. set_pmd(pmd, pfn_pmd(pfn, prot));
  296. pfn += PTRS_PER_PTE;
  297. continue;
  298. }
  299. pte = one_page_table_init(pmd);
  300. pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
  301. pte += pte_ofs;
  302. for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
  303. pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
  304. pgprot_t prot = PAGE_KERNEL;
  305. /*
  306. * first pass will use the same initial
  307. * identity mapping attribute.
  308. */
  309. pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
  310. if (is_kernel_text(addr))
  311. prot = PAGE_KERNEL_EXEC;
  312. pages_4k++;
  313. if (mapping_iter == 1) {
  314. set_pte(pte, pfn_pte(pfn, init_prot));
  315. last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE;
  316. } else
  317. set_pte(pte, pfn_pte(pfn, prot));
  318. }
  319. }
  320. }
  321. if (mapping_iter == 1) {
  322. /*
  323. * update direct mapping page count only in the first
  324. * iteration.
  325. */
  326. update_page_count(PG_LEVEL_2M, pages_2m);
  327. update_page_count(PG_LEVEL_4K, pages_4k);
  328. /*
  329. * local global flush tlb, which will flush the previous
  330. * mappings present in both small and large page TLB's.
  331. */
  332. __flush_tlb_all();
  333. /*
  334. * Second iteration will set the actual desired PTE attributes.
  335. */
  336. mapping_iter = 2;
  337. goto repeat;
  338. }
  339. return last_map_addr;
  340. }
  341. pte_t *kmap_pte;
  342. pgprot_t kmap_prot;
  343. static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr)
  344. {
  345. return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr),
  346. vaddr), vaddr), vaddr);
  347. }
  348. static void __init kmap_init(void)
  349. {
  350. unsigned long kmap_vstart;
  351. /*
  352. * Cache the first kmap pte:
  353. */
  354. kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
  355. kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
  356. kmap_prot = PAGE_KERNEL;
  357. }
  358. #ifdef CONFIG_HIGHMEM
  359. static void __init permanent_kmaps_init(pgd_t *pgd_base)
  360. {
  361. unsigned long vaddr;
  362. pgd_t *pgd;
  363. pud_t *pud;
  364. pmd_t *pmd;
  365. pte_t *pte;
  366. vaddr = PKMAP_BASE;
  367. page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
  368. pgd = swapper_pg_dir + pgd_index(vaddr);
  369. pud = pud_offset(pgd, vaddr);
  370. pmd = pmd_offset(pud, vaddr);
  371. pte = pte_offset_kernel(pmd, vaddr);
  372. pkmap_page_table = pte;
  373. }
  374. void __init add_highpages_with_active_regions(int nid,
  375. unsigned long start_pfn, unsigned long end_pfn)
  376. {
  377. phys_addr_t start, end;
  378. u64 i;
  379. for_each_free_mem_range(i, nid, MEMBLOCK_NONE, &start, &end, NULL) {
  380. unsigned long pfn = clamp_t(unsigned long, PFN_UP(start),
  381. start_pfn, end_pfn);
  382. unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end),
  383. start_pfn, end_pfn);
  384. for ( ; pfn < e_pfn; pfn++)
  385. if (pfn_valid(pfn))
  386. free_highmem_page(pfn_to_page(pfn));
  387. }
  388. }
  389. #else
  390. static inline void permanent_kmaps_init(pgd_t *pgd_base)
  391. {
  392. }
  393. #endif /* CONFIG_HIGHMEM */
  394. void __init native_pagetable_init(void)
  395. {
  396. unsigned long pfn, va;
  397. pgd_t *pgd, *base = swapper_pg_dir;
  398. pud_t *pud;
  399. pmd_t *pmd;
  400. pte_t *pte;
  401. /*
  402. * Remove any mappings which extend past the end of physical
  403. * memory from the boot time page table.
  404. * In virtual address space, we should have at least two pages
  405. * from VMALLOC_END to pkmap or fixmap according to VMALLOC_END
  406. * definition. And max_low_pfn is set to VMALLOC_END physical
  407. * address. If initial memory mapping is doing right job, we
  408. * should have pte used near max_low_pfn or one pmd is not present.
  409. */
  410. for (pfn = max_low_pfn; pfn < 1<<(32-PAGE_SHIFT); pfn++) {
  411. va = PAGE_OFFSET + (pfn<<PAGE_SHIFT);
  412. pgd = base + pgd_index(va);
  413. if (!pgd_present(*pgd))
  414. break;
  415. pud = pud_offset(pgd, va);
  416. pmd = pmd_offset(pud, va);
  417. if (!pmd_present(*pmd))
  418. break;
  419. /* should not be large page here */
  420. if (pmd_large(*pmd)) {
  421. pr_warn("try to clear pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx, but pmd is big page and is not using pte !\n",
  422. pfn, pmd, __pa(pmd));
  423. BUG_ON(1);
  424. }
  425. pte = pte_offset_kernel(pmd, va);
  426. if (!pte_present(*pte))
  427. break;
  428. printk(KERN_DEBUG "clearing pte for ram above max_low_pfn: pfn: %lx pmd: %p pmd phys: %lx pte: %p pte phys: %lx\n",
  429. pfn, pmd, __pa(pmd), pte, __pa(pte));
  430. pte_clear(NULL, va, pte);
  431. }
  432. paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT);
  433. paging_init();
  434. }
  435. /*
  436. * Build a proper pagetable for the kernel mappings. Up until this
  437. * point, we've been running on some set of pagetables constructed by
  438. * the boot process.
  439. *
  440. * If we're booting on native hardware, this will be a pagetable
  441. * constructed in arch/x86/kernel/head_32.S. The root of the
  442. * pagetable will be swapper_pg_dir.
  443. *
  444. * If we're booting paravirtualized under a hypervisor, then there are
  445. * more options: we may already be running PAE, and the pagetable may
  446. * or may not be based in swapper_pg_dir. In any case,
  447. * paravirt_pagetable_init() will set up swapper_pg_dir
  448. * appropriately for the rest of the initialization to work.
  449. *
  450. * In general, pagetable_init() assumes that the pagetable may already
  451. * be partially populated, and so it avoids stomping on any existing
  452. * mappings.
  453. */
  454. void __init early_ioremap_page_table_range_init(void)
  455. {
  456. pgd_t *pgd_base = swapper_pg_dir;
  457. unsigned long vaddr, end;
  458. /*
  459. * Fixed mappings, only the page table structure has to be
  460. * created - mappings will be set by set_fixmap():
  461. */
  462. vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
  463. end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
  464. page_table_range_init(vaddr, end, pgd_base);
  465. early_ioremap_reset();
  466. }
  467. static void __init pagetable_init(void)
  468. {
  469. pgd_t *pgd_base = swapper_pg_dir;
  470. permanent_kmaps_init(pgd_base);
  471. }
  472. pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL);
  473. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  474. /* user-defined highmem size */
  475. static unsigned int highmem_pages = -1;
  476. /*
  477. * highmem=size forces highmem to be exactly 'size' bytes.
  478. * This works even on boxes that have no highmem otherwise.
  479. * This also works to reduce highmem size on bigger boxes.
  480. */
  481. static int __init parse_highmem(char *arg)
  482. {
  483. if (!arg)
  484. return -EINVAL;
  485. highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
  486. return 0;
  487. }
  488. early_param("highmem", parse_highmem);
  489. #define MSG_HIGHMEM_TOO_BIG \
  490. "highmem size (%luMB) is bigger than pages available (%luMB)!\n"
  491. #define MSG_LOWMEM_TOO_SMALL \
  492. "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n"
  493. /*
  494. * All of RAM fits into lowmem - but if user wants highmem
  495. * artificially via the highmem=x boot parameter then create
  496. * it:
  497. */
  498. static void __init lowmem_pfn_init(void)
  499. {
  500. /* max_low_pfn is 0, we already have early_res support */
  501. max_low_pfn = max_pfn;
  502. if (highmem_pages == -1)
  503. highmem_pages = 0;
  504. #ifdef CONFIG_HIGHMEM
  505. if (highmem_pages >= max_pfn) {
  506. printk(KERN_ERR MSG_HIGHMEM_TOO_BIG,
  507. pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
  508. highmem_pages = 0;
  509. }
  510. if (highmem_pages) {
  511. if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) {
  512. printk(KERN_ERR MSG_LOWMEM_TOO_SMALL,
  513. pages_to_mb(highmem_pages));
  514. highmem_pages = 0;
  515. }
  516. max_low_pfn -= highmem_pages;
  517. }
  518. #else
  519. if (highmem_pages)
  520. printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
  521. #endif
  522. }
  523. #define MSG_HIGHMEM_TOO_SMALL \
  524. "only %luMB highmem pages available, ignoring highmem size of %luMB!\n"
  525. #define MSG_HIGHMEM_TRIMMED \
  526. "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n"
  527. /*
  528. * We have more RAM than fits into lowmem - we try to put it into
  529. * highmem, also taking the highmem=x boot parameter into account:
  530. */
  531. static void __init highmem_pfn_init(void)
  532. {
  533. max_low_pfn = MAXMEM_PFN;
  534. if (highmem_pages == -1)
  535. highmem_pages = max_pfn - MAXMEM_PFN;
  536. if (highmem_pages + MAXMEM_PFN < max_pfn)
  537. max_pfn = MAXMEM_PFN + highmem_pages;
  538. if (highmem_pages + MAXMEM_PFN > max_pfn) {
  539. printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL,
  540. pages_to_mb(max_pfn - MAXMEM_PFN),
  541. pages_to_mb(highmem_pages));
  542. highmem_pages = 0;
  543. }
  544. #ifndef CONFIG_HIGHMEM
  545. /* Maximum memory usable is what is directly addressable */
  546. printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20);
  547. if (max_pfn > MAX_NONPAE_PFN)
  548. printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n");
  549. else
  550. printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
  551. max_pfn = MAXMEM_PFN;
  552. #else /* !CONFIG_HIGHMEM */
  553. #ifndef CONFIG_HIGHMEM64G
  554. if (max_pfn > MAX_NONPAE_PFN) {
  555. max_pfn = MAX_NONPAE_PFN;
  556. printk(KERN_WARNING MSG_HIGHMEM_TRIMMED);
  557. }
  558. #endif /* !CONFIG_HIGHMEM64G */
  559. #endif /* !CONFIG_HIGHMEM */
  560. }
  561. /*
  562. * Determine low and high memory ranges:
  563. */
  564. void __init find_low_pfn_range(void)
  565. {
  566. /* it could update max_pfn */
  567. if (max_pfn <= MAXMEM_PFN)
  568. lowmem_pfn_init();
  569. else
  570. highmem_pfn_init();
  571. }
  572. #ifndef CONFIG_NEED_MULTIPLE_NODES
  573. void __init initmem_init(void)
  574. {
  575. #ifdef CONFIG_HIGHMEM
  576. highstart_pfn = highend_pfn = max_pfn;
  577. if (max_pfn > max_low_pfn)
  578. highstart_pfn = max_low_pfn;
  579. printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
  580. pages_to_mb(highend_pfn - highstart_pfn));
  581. high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
  582. #else
  583. high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
  584. #endif
  585. memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
  586. sparse_memory_present_with_active_regions(0);
  587. #ifdef CONFIG_FLATMEM
  588. max_mapnr = IS_ENABLED(CONFIG_HIGHMEM) ? highend_pfn : max_low_pfn;
  589. #endif
  590. __vmalloc_start_set = true;
  591. printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
  592. pages_to_mb(max_low_pfn));
  593. setup_bootmem_allocator();
  594. }
  595. #endif /* !CONFIG_NEED_MULTIPLE_NODES */
  596. void __init setup_bootmem_allocator(void)
  597. {
  598. printk(KERN_INFO " mapped low ram: 0 - %08lx\n",
  599. max_pfn_mapped<<PAGE_SHIFT);
  600. printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT);
  601. }
  602. /*
  603. * paging_init() sets up the page tables - note that the first 8MB are
  604. * already mapped by head.S.
  605. *
  606. * This routines also unmaps the page at virtual kernel address 0, so
  607. * that we can trap those pesky NULL-reference errors in the kernel.
  608. */
  609. void __init paging_init(void)
  610. {
  611. pagetable_init();
  612. __flush_tlb_all();
  613. kmap_init();
  614. /*
  615. * NOTE: at this point the bootmem allocator is fully available.
  616. */
  617. olpc_dt_build_devicetree();
  618. sparse_memory_present_with_active_regions(MAX_NUMNODES);
  619. sparse_init();
  620. zone_sizes_init();
  621. }
  622. /*
  623. * Test if the WP bit works in supervisor mode. It isn't supported on 386's
  624. * and also on some strange 486's. All 586+'s are OK. This used to involve
  625. * black magic jumps to work around some nasty CPU bugs, but fortunately the
  626. * switch to using exceptions got rid of all that.
  627. */
  628. static void __init test_wp_bit(void)
  629. {
  630. printk(KERN_INFO
  631. "Checking if this processor honours the WP bit even in supervisor mode...");
  632. /* Any page-aligned address will do, the test is non-destructive */
  633. __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_KERNEL_RO);
  634. boot_cpu_data.wp_works_ok = do_test_wp_bit();
  635. clear_fixmap(FIX_WP_TEST);
  636. if (!boot_cpu_data.wp_works_ok) {
  637. printk(KERN_CONT "No.\n");
  638. panic("Linux doesn't support CPUs with broken WP.");
  639. } else {
  640. printk(KERN_CONT "Ok.\n");
  641. }
  642. }
  643. void __init mem_init(void)
  644. {
  645. pci_iommu_alloc();
  646. #ifdef CONFIG_FLATMEM
  647. BUG_ON(!mem_map);
  648. #endif
  649. /*
  650. * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to
  651. * be done before free_all_bootmem(). Memblock use free low memory for
  652. * temporary data (see find_range_array()) and for this purpose can use
  653. * pages that was already passed to the buddy allocator, hence marked as
  654. * not accessible in the page tables when compiled with
  655. * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not
  656. * important here.
  657. */
  658. set_highmem_pages_init();
  659. /* this will put all low memory onto the freelists */
  660. free_all_bootmem();
  661. after_bootmem = 1;
  662. mem_init_print_info(NULL);
  663. printk(KERN_INFO "virtual kernel memory layout:\n"
  664. " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  665. #ifdef CONFIG_HIGHMEM
  666. " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
  667. #endif
  668. " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
  669. " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
  670. " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
  671. " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
  672. " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
  673. FIXADDR_START, FIXADDR_TOP,
  674. (FIXADDR_TOP - FIXADDR_START) >> 10,
  675. #ifdef CONFIG_HIGHMEM
  676. PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
  677. (LAST_PKMAP*PAGE_SIZE) >> 10,
  678. #endif
  679. VMALLOC_START, VMALLOC_END,
  680. (VMALLOC_END - VMALLOC_START) >> 20,
  681. (unsigned long)__va(0), (unsigned long)high_memory,
  682. ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
  683. (unsigned long)&__init_begin, (unsigned long)&__init_end,
  684. ((unsigned long)&__init_end -
  685. (unsigned long)&__init_begin) >> 10,
  686. (unsigned long)&_etext, (unsigned long)&_edata,
  687. ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
  688. (unsigned long)&_text, (unsigned long)&_etext,
  689. ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
  690. /*
  691. * Check boundaries twice: Some fundamental inconsistencies can
  692. * be detected at build time already.
  693. */
  694. #define __FIXADDR_TOP (-PAGE_SIZE)
  695. #ifdef CONFIG_HIGHMEM
  696. BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  697. BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE);
  698. #endif
  699. #define high_memory (-128UL << 20)
  700. BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END);
  701. #undef high_memory
  702. #undef __FIXADDR_TOP
  703. #ifdef CONFIG_RANDOMIZE_BASE
  704. BUILD_BUG_ON(CONFIG_RANDOMIZE_BASE_MAX_OFFSET > KERNEL_IMAGE_SIZE);
  705. #endif
  706. #ifdef CONFIG_HIGHMEM
  707. BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
  708. BUG_ON(VMALLOC_END > PKMAP_BASE);
  709. #endif
  710. BUG_ON(VMALLOC_START >= VMALLOC_END);
  711. BUG_ON((unsigned long)high_memory > VMALLOC_START);
  712. if (boot_cpu_data.wp_works_ok < 0)
  713. test_wp_bit();
  714. }
  715. #ifdef CONFIG_MEMORY_HOTPLUG
  716. int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
  717. {
  718. struct pglist_data *pgdata = NODE_DATA(nid);
  719. struct zone *zone = pgdata->node_zones +
  720. zone_for_memory(nid, start, size, ZONE_HIGHMEM, for_device);
  721. unsigned long start_pfn = start >> PAGE_SHIFT;
  722. unsigned long nr_pages = size >> PAGE_SHIFT;
  723. return __add_pages(nid, zone, start_pfn, nr_pages);
  724. }
  725. #ifdef CONFIG_MEMORY_HOTREMOVE
  726. int arch_remove_memory(u64 start, u64 size)
  727. {
  728. unsigned long start_pfn = start >> PAGE_SHIFT;
  729. unsigned long nr_pages = size >> PAGE_SHIFT;
  730. struct zone *zone;
  731. zone = page_zone(pfn_to_page(start_pfn));
  732. return __remove_pages(zone, start_pfn, nr_pages);
  733. }
  734. #endif
  735. #endif
  736. /*
  737. * This function cannot be __init, since exceptions don't work in that
  738. * section. Put this after the callers, so that it cannot be inlined.
  739. */
  740. static noinline int do_test_wp_bit(void)
  741. {
  742. char tmp_reg;
  743. int flag;
  744. __asm__ __volatile__(
  745. " movb %0, %1 \n"
  746. "1: movb %1, %0 \n"
  747. " xorl %2, %2 \n"
  748. "2: \n"
  749. _ASM_EXTABLE(1b,2b)
  750. :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
  751. "=q" (tmp_reg),
  752. "=r" (flag)
  753. :"2" (1)
  754. :"memory");
  755. return flag;
  756. }
  757. #ifdef CONFIG_DEBUG_RODATA
  758. const int rodata_test_data = 0xC3;
  759. EXPORT_SYMBOL_GPL(rodata_test_data);
  760. int kernel_set_to_readonly __read_mostly;
  761. void set_kernel_text_rw(void)
  762. {
  763. unsigned long start = PFN_ALIGN(_text);
  764. unsigned long size = PFN_ALIGN(_etext) - start;
  765. if (!kernel_set_to_readonly)
  766. return;
  767. pr_debug("Set kernel text: %lx - %lx for read write\n",
  768. start, start+size);
  769. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  770. }
  771. void set_kernel_text_ro(void)
  772. {
  773. unsigned long start = PFN_ALIGN(_text);
  774. unsigned long size = PFN_ALIGN(_etext) - start;
  775. if (!kernel_set_to_readonly)
  776. return;
  777. pr_debug("Set kernel text: %lx - %lx for read only\n",
  778. start, start+size);
  779. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  780. }
  781. static void mark_nxdata_nx(void)
  782. {
  783. /*
  784. * When this called, init has already been executed and released,
  785. * so everything past _etext should be NX.
  786. */
  787. unsigned long start = PFN_ALIGN(_etext);
  788. /*
  789. * This comes from is_kernel_text upper limit. Also HPAGE where used:
  790. */
  791. unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start;
  792. if (__supported_pte_mask & _PAGE_NX)
  793. printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10);
  794. set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT);
  795. }
  796. void mark_rodata_ro(void)
  797. {
  798. unsigned long start = PFN_ALIGN(_text);
  799. unsigned long size = PFN_ALIGN(_etext) - start;
  800. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  801. printk(KERN_INFO "Write protecting the kernel text: %luk\n",
  802. size >> 10);
  803. kernel_set_to_readonly = 1;
  804. #ifdef CONFIG_CPA_DEBUG
  805. printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n",
  806. start, start+size);
  807. set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT);
  808. printk(KERN_INFO "Testing CPA: write protecting again\n");
  809. set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT);
  810. #endif
  811. start += size;
  812. size = (unsigned long)__end_rodata - start;
  813. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  814. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  815. size >> 10);
  816. rodata_test();
  817. #ifdef CONFIG_CPA_DEBUG
  818. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size);
  819. set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT);
  820. printk(KERN_INFO "Testing CPA: write protecting again\n");
  821. set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
  822. #endif
  823. mark_nxdata_nx();
  824. if (__supported_pte_mask & _PAGE_NX)
  825. debug_checkwx();
  826. }
  827. #endif