init_64.c 33 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388
  1. /*
  2. * linux/arch/x86_64/mm/init.c
  3. *
  4. * Copyright (C) 1995 Linus Torvalds
  5. * Copyright (C) 2000 Pavel Machek <pavel@ucw.cz>
  6. * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
  7. */
  8. #include <linux/signal.h>
  9. #include <linux/sched.h>
  10. #include <linux/kernel.h>
  11. #include <linux/errno.h>
  12. #include <linux/string.h>
  13. #include <linux/types.h>
  14. #include <linux/ptrace.h>
  15. #include <linux/mman.h>
  16. #include <linux/mm.h>
  17. #include <linux/swap.h>
  18. #include <linux/smp.h>
  19. #include <linux/init.h>
  20. #include <linux/initrd.h>
  21. #include <linux/pagemap.h>
  22. #include <linux/bootmem.h>
  23. #include <linux/memblock.h>
  24. #include <linux/proc_fs.h>
  25. #include <linux/pci.h>
  26. #include <linux/pfn.h>
  27. #include <linux/poison.h>
  28. #include <linux/dma-mapping.h>
  29. #include <linux/module.h>
  30. #include <linux/memory.h>
  31. #include <linux/memory_hotplug.h>
  32. #include <linux/nmi.h>
  33. #include <linux/gfp.h>
  34. #include <linux/kcore.h>
  35. #include <asm/processor.h>
  36. #include <asm/bios_ebda.h>
  37. #include <asm/uaccess.h>
  38. #include <asm/pgtable.h>
  39. #include <asm/pgalloc.h>
  40. #include <asm/dma.h>
  41. #include <asm/fixmap.h>
  42. #include <asm/e820.h>
  43. #include <asm/apic.h>
  44. #include <asm/tlb.h>
  45. #include <asm/mmu_context.h>
  46. #include <asm/proto.h>
  47. #include <asm/smp.h>
  48. #include <asm/sections.h>
  49. #include <asm/kdebug.h>
  50. #include <asm/numa.h>
  51. #include <asm/cacheflush.h>
  52. #include <asm/init.h>
  53. #include <asm/setup.h>
  54. #include "mm_internal.h"
  55. static void ident_pmd_init(unsigned long pmd_flag, pmd_t *pmd_page,
  56. unsigned long addr, unsigned long end)
  57. {
  58. addr &= PMD_MASK;
  59. for (; addr < end; addr += PMD_SIZE) {
  60. pmd_t *pmd = pmd_page + pmd_index(addr);
  61. if (!pmd_present(*pmd))
  62. set_pmd(pmd, __pmd(addr | pmd_flag));
  63. }
  64. }
  65. static int ident_pud_init(struct x86_mapping_info *info, pud_t *pud_page,
  66. unsigned long addr, unsigned long end)
  67. {
  68. unsigned long next;
  69. for (; addr < end; addr = next) {
  70. pud_t *pud = pud_page + pud_index(addr);
  71. pmd_t *pmd;
  72. next = (addr & PUD_MASK) + PUD_SIZE;
  73. if (next > end)
  74. next = end;
  75. if (pud_present(*pud)) {
  76. pmd = pmd_offset(pud, 0);
  77. ident_pmd_init(info->pmd_flag, pmd, addr, next);
  78. continue;
  79. }
  80. pmd = (pmd_t *)info->alloc_pgt_page(info->context);
  81. if (!pmd)
  82. return -ENOMEM;
  83. ident_pmd_init(info->pmd_flag, pmd, addr, next);
  84. set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE));
  85. }
  86. return 0;
  87. }
  88. int kernel_ident_mapping_init(struct x86_mapping_info *info, pgd_t *pgd_page,
  89. unsigned long addr, unsigned long end)
  90. {
  91. unsigned long next;
  92. int result;
  93. int off = info->kernel_mapping ? pgd_index(__PAGE_OFFSET) : 0;
  94. for (; addr < end; addr = next) {
  95. pgd_t *pgd = pgd_page + pgd_index(addr) + off;
  96. pud_t *pud;
  97. next = (addr & PGDIR_MASK) + PGDIR_SIZE;
  98. if (next > end)
  99. next = end;
  100. if (pgd_present(*pgd)) {
  101. pud = pud_offset(pgd, 0);
  102. result = ident_pud_init(info, pud, addr, next);
  103. if (result)
  104. return result;
  105. continue;
  106. }
  107. pud = (pud_t *)info->alloc_pgt_page(info->context);
  108. if (!pud)
  109. return -ENOMEM;
  110. result = ident_pud_init(info, pud, addr, next);
  111. if (result)
  112. return result;
  113. set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE));
  114. }
  115. return 0;
  116. }
  117. /*
  118. * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
  119. * physical space so we can cache the place of the first one and move
  120. * around without checking the pgd every time.
  121. */
  122. pteval_t __supported_pte_mask __read_mostly = ~0;
  123. EXPORT_SYMBOL_GPL(__supported_pte_mask);
  124. int force_personality32;
  125. /*
  126. * noexec32=on|off
  127. * Control non executable heap for 32bit processes.
  128. * To control the stack too use noexec=off
  129. *
  130. * on PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
  131. * off PROT_READ implies PROT_EXEC
  132. */
  133. static int __init nonx32_setup(char *str)
  134. {
  135. if (!strcmp(str, "on"))
  136. force_personality32 &= ~READ_IMPLIES_EXEC;
  137. else if (!strcmp(str, "off"))
  138. force_personality32 |= READ_IMPLIES_EXEC;
  139. return 1;
  140. }
  141. __setup("noexec32=", nonx32_setup);
  142. /*
  143. * When memory was added/removed make sure all the processes MM have
  144. * suitable PGD entries in the local PGD level page.
  145. */
  146. void sync_global_pgds(unsigned long start, unsigned long end, int removed)
  147. {
  148. unsigned long address;
  149. for (address = start; address <= end; address += PGDIR_SIZE) {
  150. const pgd_t *pgd_ref = pgd_offset_k(address);
  151. struct page *page;
  152. /*
  153. * When it is called after memory hot remove, pgd_none()
  154. * returns true. In this case (removed == 1), we must clear
  155. * the PGD entries in the local PGD level page.
  156. */
  157. if (pgd_none(*pgd_ref) && !removed)
  158. continue;
  159. spin_lock(&pgd_lock);
  160. list_for_each_entry(page, &pgd_list, lru) {
  161. pgd_t *pgd;
  162. spinlock_t *pgt_lock;
  163. pgd = (pgd_t *)page_address(page) + pgd_index(address);
  164. /* the pgt_lock only for Xen */
  165. pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
  166. spin_lock(pgt_lock);
  167. if (!pgd_none(*pgd_ref) && !pgd_none(*pgd))
  168. BUG_ON(pgd_page_vaddr(*pgd)
  169. != pgd_page_vaddr(*pgd_ref));
  170. if (removed) {
  171. if (pgd_none(*pgd_ref) && !pgd_none(*pgd))
  172. pgd_clear(pgd);
  173. } else {
  174. if (pgd_none(*pgd))
  175. set_pgd(pgd, *pgd_ref);
  176. }
  177. spin_unlock(pgt_lock);
  178. }
  179. spin_unlock(&pgd_lock);
  180. }
  181. }
  182. /*
  183. * NOTE: This function is marked __ref because it calls __init function
  184. * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
  185. */
  186. static __ref void *spp_getpage(void)
  187. {
  188. void *ptr;
  189. if (after_bootmem)
  190. ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
  191. else
  192. ptr = alloc_bootmem_pages(PAGE_SIZE);
  193. if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
  194. panic("set_pte_phys: cannot allocate page data %s\n",
  195. after_bootmem ? "after bootmem" : "");
  196. }
  197. pr_debug("spp_getpage %p\n", ptr);
  198. return ptr;
  199. }
  200. static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
  201. {
  202. if (pgd_none(*pgd)) {
  203. pud_t *pud = (pud_t *)spp_getpage();
  204. pgd_populate(&init_mm, pgd, pud);
  205. if (pud != pud_offset(pgd, 0))
  206. printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
  207. pud, pud_offset(pgd, 0));
  208. }
  209. return pud_offset(pgd, vaddr);
  210. }
  211. static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
  212. {
  213. if (pud_none(*pud)) {
  214. pmd_t *pmd = (pmd_t *) spp_getpage();
  215. pud_populate(&init_mm, pud, pmd);
  216. if (pmd != pmd_offset(pud, 0))
  217. printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
  218. pmd, pmd_offset(pud, 0));
  219. }
  220. return pmd_offset(pud, vaddr);
  221. }
  222. static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
  223. {
  224. if (pmd_none(*pmd)) {
  225. pte_t *pte = (pte_t *) spp_getpage();
  226. pmd_populate_kernel(&init_mm, pmd, pte);
  227. if (pte != pte_offset_kernel(pmd, 0))
  228. printk(KERN_ERR "PAGETABLE BUG #02!\n");
  229. }
  230. return pte_offset_kernel(pmd, vaddr);
  231. }
  232. void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
  233. {
  234. pud_t *pud;
  235. pmd_t *pmd;
  236. pte_t *pte;
  237. pud = pud_page + pud_index(vaddr);
  238. pmd = fill_pmd(pud, vaddr);
  239. pte = fill_pte(pmd, vaddr);
  240. set_pte(pte, new_pte);
  241. /*
  242. * It's enough to flush this one mapping.
  243. * (PGE mappings get flushed as well)
  244. */
  245. __flush_tlb_one(vaddr);
  246. }
  247. void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
  248. {
  249. pgd_t *pgd;
  250. pud_t *pud_page;
  251. pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
  252. pgd = pgd_offset_k(vaddr);
  253. if (pgd_none(*pgd)) {
  254. printk(KERN_ERR
  255. "PGD FIXMAP MISSING, it should be setup in head.S!\n");
  256. return;
  257. }
  258. pud_page = (pud_t*)pgd_page_vaddr(*pgd);
  259. set_pte_vaddr_pud(pud_page, vaddr, pteval);
  260. }
  261. pmd_t * __init populate_extra_pmd(unsigned long vaddr)
  262. {
  263. pgd_t *pgd;
  264. pud_t *pud;
  265. pgd = pgd_offset_k(vaddr);
  266. pud = fill_pud(pgd, vaddr);
  267. return fill_pmd(pud, vaddr);
  268. }
  269. pte_t * __init populate_extra_pte(unsigned long vaddr)
  270. {
  271. pmd_t *pmd;
  272. pmd = populate_extra_pmd(vaddr);
  273. return fill_pte(pmd, vaddr);
  274. }
  275. /*
  276. * Create large page table mappings for a range of physical addresses.
  277. */
  278. static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
  279. enum page_cache_mode cache)
  280. {
  281. pgd_t *pgd;
  282. pud_t *pud;
  283. pmd_t *pmd;
  284. pgprot_t prot;
  285. pgprot_val(prot) = pgprot_val(PAGE_KERNEL_LARGE) |
  286. pgprot_val(pgprot_4k_2_large(cachemode2pgprot(cache)));
  287. BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
  288. for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
  289. pgd = pgd_offset_k((unsigned long)__va(phys));
  290. if (pgd_none(*pgd)) {
  291. pud = (pud_t *) spp_getpage();
  292. set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
  293. _PAGE_USER));
  294. }
  295. pud = pud_offset(pgd, (unsigned long)__va(phys));
  296. if (pud_none(*pud)) {
  297. pmd = (pmd_t *) spp_getpage();
  298. set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
  299. _PAGE_USER));
  300. }
  301. pmd = pmd_offset(pud, phys);
  302. BUG_ON(!pmd_none(*pmd));
  303. set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
  304. }
  305. }
  306. void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
  307. {
  308. __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_WB);
  309. }
  310. void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
  311. {
  312. __init_extra_mapping(phys, size, _PAGE_CACHE_MODE_UC);
  313. }
  314. /*
  315. * The head.S code sets up the kernel high mapping:
  316. *
  317. * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
  318. *
  319. * phys_base holds the negative offset to the kernel, which is added
  320. * to the compile time generated pmds. This results in invalid pmds up
  321. * to the point where we hit the physaddr 0 mapping.
  322. *
  323. * We limit the mappings to the region from _text to _brk_end. _brk_end
  324. * is rounded up to the 2MB boundary. This catches the invalid pmds as
  325. * well, as they are located before _text:
  326. */
  327. void __init cleanup_highmap(void)
  328. {
  329. unsigned long vaddr = __START_KERNEL_map;
  330. unsigned long vaddr_end = __START_KERNEL_map + KERNEL_IMAGE_SIZE;
  331. unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
  332. pmd_t *pmd = level2_kernel_pgt;
  333. /*
  334. * Native path, max_pfn_mapped is not set yet.
  335. * Xen has valid max_pfn_mapped set in
  336. * arch/x86/xen/mmu.c:xen_setup_kernel_pagetable().
  337. */
  338. if (max_pfn_mapped)
  339. vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
  340. for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
  341. if (pmd_none(*pmd))
  342. continue;
  343. if (vaddr < (unsigned long) _text || vaddr > end)
  344. set_pmd(pmd, __pmd(0));
  345. else if (kaiser_enabled) {
  346. /*
  347. * level2_kernel_pgt is initialized with _PAGE_GLOBAL:
  348. * clear that now. This is not important, so long as
  349. * CR4.PGE remains clear, but it removes an anomaly.
  350. * Physical mapping setup below avoids _PAGE_GLOBAL
  351. * by use of massage_pgprot() inside pfn_pte() etc.
  352. */
  353. set_pmd(pmd, pmd_clear_flags(*pmd, _PAGE_GLOBAL));
  354. }
  355. }
  356. }
  357. static unsigned long __meminit
  358. phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
  359. pgprot_t prot)
  360. {
  361. unsigned long pages = 0, next;
  362. unsigned long last_map_addr = end;
  363. int i;
  364. pte_t *pte = pte_page + pte_index(addr);
  365. for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
  366. next = (addr & PAGE_MASK) + PAGE_SIZE;
  367. if (addr >= end) {
  368. if (!after_bootmem &&
  369. !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
  370. !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
  371. set_pte(pte, __pte(0));
  372. continue;
  373. }
  374. /*
  375. * We will re-use the existing mapping.
  376. * Xen for example has some special requirements, like mapping
  377. * pagetable pages as RO. So assume someone who pre-setup
  378. * these mappings are more intelligent.
  379. */
  380. if (pte_val(*pte)) {
  381. if (!after_bootmem)
  382. pages++;
  383. continue;
  384. }
  385. if (0)
  386. printk(" pte=%p addr=%lx pte=%016lx\n",
  387. pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
  388. pages++;
  389. set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
  390. last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
  391. }
  392. update_page_count(PG_LEVEL_4K, pages);
  393. return last_map_addr;
  394. }
  395. static unsigned long __meminit
  396. phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
  397. unsigned long page_size_mask, pgprot_t prot)
  398. {
  399. unsigned long pages = 0, next;
  400. unsigned long last_map_addr = end;
  401. int i = pmd_index(address);
  402. for (; i < PTRS_PER_PMD; i++, address = next) {
  403. pmd_t *pmd = pmd_page + pmd_index(address);
  404. pte_t *pte;
  405. pgprot_t new_prot = prot;
  406. next = (address & PMD_MASK) + PMD_SIZE;
  407. if (address >= end) {
  408. if (!after_bootmem &&
  409. !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
  410. !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
  411. set_pmd(pmd, __pmd(0));
  412. continue;
  413. }
  414. if (pmd_val(*pmd)) {
  415. if (!pmd_large(*pmd)) {
  416. spin_lock(&init_mm.page_table_lock);
  417. pte = (pte_t *)pmd_page_vaddr(*pmd);
  418. last_map_addr = phys_pte_init(pte, address,
  419. end, prot);
  420. spin_unlock(&init_mm.page_table_lock);
  421. continue;
  422. }
  423. /*
  424. * If we are ok with PG_LEVEL_2M mapping, then we will
  425. * use the existing mapping,
  426. *
  427. * Otherwise, we will split the large page mapping but
  428. * use the same existing protection bits except for
  429. * large page, so that we don't violate Intel's TLB
  430. * Application note (317080) which says, while changing
  431. * the page sizes, new and old translations should
  432. * not differ with respect to page frame and
  433. * attributes.
  434. */
  435. if (page_size_mask & (1 << PG_LEVEL_2M)) {
  436. if (!after_bootmem)
  437. pages++;
  438. last_map_addr = next;
  439. continue;
  440. }
  441. new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
  442. }
  443. if (page_size_mask & (1<<PG_LEVEL_2M)) {
  444. pages++;
  445. spin_lock(&init_mm.page_table_lock);
  446. set_pte((pte_t *)pmd,
  447. pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
  448. __pgprot(pgprot_val(prot) | _PAGE_PSE)));
  449. spin_unlock(&init_mm.page_table_lock);
  450. last_map_addr = next;
  451. continue;
  452. }
  453. pte = alloc_low_page();
  454. last_map_addr = phys_pte_init(pte, address, end, new_prot);
  455. spin_lock(&init_mm.page_table_lock);
  456. pmd_populate_kernel(&init_mm, pmd, pte);
  457. spin_unlock(&init_mm.page_table_lock);
  458. }
  459. update_page_count(PG_LEVEL_2M, pages);
  460. return last_map_addr;
  461. }
  462. static unsigned long __meminit
  463. phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
  464. unsigned long page_size_mask)
  465. {
  466. unsigned long pages = 0, next;
  467. unsigned long last_map_addr = end;
  468. int i = pud_index(addr);
  469. for (; i < PTRS_PER_PUD; i++, addr = next) {
  470. pud_t *pud = pud_page + pud_index(addr);
  471. pmd_t *pmd;
  472. pgprot_t prot = PAGE_KERNEL;
  473. next = (addr & PUD_MASK) + PUD_SIZE;
  474. if (addr >= end) {
  475. if (!after_bootmem &&
  476. !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
  477. !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
  478. set_pud(pud, __pud(0));
  479. continue;
  480. }
  481. if (pud_val(*pud)) {
  482. if (!pud_large(*pud)) {
  483. pmd = pmd_offset(pud, 0);
  484. last_map_addr = phys_pmd_init(pmd, addr, end,
  485. page_size_mask, prot);
  486. __flush_tlb_all();
  487. continue;
  488. }
  489. /*
  490. * If we are ok with PG_LEVEL_1G mapping, then we will
  491. * use the existing mapping.
  492. *
  493. * Otherwise, we will split the gbpage mapping but use
  494. * the same existing protection bits except for large
  495. * page, so that we don't violate Intel's TLB
  496. * Application note (317080) which says, while changing
  497. * the page sizes, new and old translations should
  498. * not differ with respect to page frame and
  499. * attributes.
  500. */
  501. if (page_size_mask & (1 << PG_LEVEL_1G)) {
  502. if (!after_bootmem)
  503. pages++;
  504. last_map_addr = next;
  505. continue;
  506. }
  507. prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
  508. }
  509. if (page_size_mask & (1<<PG_LEVEL_1G)) {
  510. pages++;
  511. spin_lock(&init_mm.page_table_lock);
  512. set_pte((pte_t *)pud,
  513. pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
  514. PAGE_KERNEL_LARGE));
  515. spin_unlock(&init_mm.page_table_lock);
  516. last_map_addr = next;
  517. continue;
  518. }
  519. pmd = alloc_low_page();
  520. last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
  521. prot);
  522. spin_lock(&init_mm.page_table_lock);
  523. pud_populate(&init_mm, pud, pmd);
  524. spin_unlock(&init_mm.page_table_lock);
  525. }
  526. __flush_tlb_all();
  527. update_page_count(PG_LEVEL_1G, pages);
  528. return last_map_addr;
  529. }
  530. unsigned long __meminit
  531. kernel_physical_mapping_init(unsigned long start,
  532. unsigned long end,
  533. unsigned long page_size_mask)
  534. {
  535. bool pgd_changed = false;
  536. unsigned long next, last_map_addr = end;
  537. unsigned long addr;
  538. start = (unsigned long)__va(start);
  539. end = (unsigned long)__va(end);
  540. addr = start;
  541. for (; start < end; start = next) {
  542. pgd_t *pgd = pgd_offset_k(start);
  543. pud_t *pud;
  544. next = (start & PGDIR_MASK) + PGDIR_SIZE;
  545. if (pgd_val(*pgd)) {
  546. pud = (pud_t *)pgd_page_vaddr(*pgd);
  547. last_map_addr = phys_pud_init(pud, __pa(start),
  548. __pa(end), page_size_mask);
  549. continue;
  550. }
  551. pud = alloc_low_page();
  552. last_map_addr = phys_pud_init(pud, __pa(start), __pa(end),
  553. page_size_mask);
  554. spin_lock(&init_mm.page_table_lock);
  555. pgd_populate(&init_mm, pgd, pud);
  556. spin_unlock(&init_mm.page_table_lock);
  557. pgd_changed = true;
  558. }
  559. if (pgd_changed)
  560. sync_global_pgds(addr, end - 1, 0);
  561. __flush_tlb_all();
  562. return last_map_addr;
  563. }
  564. #ifndef CONFIG_NUMA
  565. void __init initmem_init(void)
  566. {
  567. memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
  568. }
  569. #endif
  570. void __init paging_init(void)
  571. {
  572. sparse_memory_present_with_active_regions(MAX_NUMNODES);
  573. sparse_init();
  574. /*
  575. * clear the default setting with node 0
  576. * note: don't use nodes_clear here, that is really clearing when
  577. * numa support is not compiled in, and later node_set_state
  578. * will not set it back.
  579. */
  580. node_clear_state(0, N_MEMORY);
  581. if (N_MEMORY != N_NORMAL_MEMORY)
  582. node_clear_state(0, N_NORMAL_MEMORY);
  583. zone_sizes_init();
  584. }
  585. /*
  586. * Memory hotplug specific functions
  587. */
  588. #ifdef CONFIG_MEMORY_HOTPLUG
  589. /*
  590. * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
  591. * updating.
  592. */
  593. static void update_end_of_memory_vars(u64 start, u64 size)
  594. {
  595. unsigned long end_pfn = PFN_UP(start + size);
  596. if (end_pfn > max_pfn) {
  597. max_pfn = end_pfn;
  598. max_low_pfn = end_pfn;
  599. high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
  600. }
  601. }
  602. /*
  603. * Memory is added always to NORMAL zone. This means you will never get
  604. * additional DMA/DMA32 memory.
  605. */
  606. int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
  607. {
  608. struct pglist_data *pgdat = NODE_DATA(nid);
  609. struct zone *zone = pgdat->node_zones +
  610. zone_for_memory(nid, start, size, ZONE_NORMAL, for_device);
  611. unsigned long start_pfn = start >> PAGE_SHIFT;
  612. unsigned long nr_pages = size >> PAGE_SHIFT;
  613. int ret;
  614. init_memory_mapping(start, start + size);
  615. ret = __add_pages(nid, zone, start_pfn, nr_pages);
  616. WARN_ON_ONCE(ret);
  617. /* update max_pfn, max_low_pfn and high_memory */
  618. update_end_of_memory_vars(start, size);
  619. return ret;
  620. }
  621. EXPORT_SYMBOL_GPL(arch_add_memory);
  622. #define PAGE_INUSE 0xFD
  623. static void __meminit free_pagetable(struct page *page, int order)
  624. {
  625. unsigned long magic;
  626. unsigned int nr_pages = 1 << order;
  627. /* bootmem page has reserved flag */
  628. if (PageReserved(page)) {
  629. __ClearPageReserved(page);
  630. magic = (unsigned long)page->lru.next;
  631. if (magic == SECTION_INFO || magic == MIX_SECTION_INFO) {
  632. while (nr_pages--)
  633. put_page_bootmem(page++);
  634. } else
  635. while (nr_pages--)
  636. free_reserved_page(page++);
  637. } else
  638. free_pages((unsigned long)page_address(page), order);
  639. }
  640. static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
  641. {
  642. pte_t *pte;
  643. int i;
  644. for (i = 0; i < PTRS_PER_PTE; i++) {
  645. pte = pte_start + i;
  646. if (pte_val(*pte))
  647. return;
  648. }
  649. /* free a pte talbe */
  650. free_pagetable(pmd_page(*pmd), 0);
  651. spin_lock(&init_mm.page_table_lock);
  652. pmd_clear(pmd);
  653. spin_unlock(&init_mm.page_table_lock);
  654. }
  655. static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
  656. {
  657. pmd_t *pmd;
  658. int i;
  659. for (i = 0; i < PTRS_PER_PMD; i++) {
  660. pmd = pmd_start + i;
  661. if (pmd_val(*pmd))
  662. return;
  663. }
  664. /* free a pmd talbe */
  665. free_pagetable(pud_page(*pud), 0);
  666. spin_lock(&init_mm.page_table_lock);
  667. pud_clear(pud);
  668. spin_unlock(&init_mm.page_table_lock);
  669. }
  670. /* Return true if pgd is changed, otherwise return false. */
  671. static bool __meminit free_pud_table(pud_t *pud_start, pgd_t *pgd)
  672. {
  673. pud_t *pud;
  674. int i;
  675. for (i = 0; i < PTRS_PER_PUD; i++) {
  676. pud = pud_start + i;
  677. if (pud_val(*pud))
  678. return false;
  679. }
  680. /* free a pud table */
  681. free_pagetable(pgd_page(*pgd), 0);
  682. spin_lock(&init_mm.page_table_lock);
  683. pgd_clear(pgd);
  684. spin_unlock(&init_mm.page_table_lock);
  685. return true;
  686. }
  687. static void __meminit
  688. remove_pte_table(pte_t *pte_start, unsigned long addr, unsigned long end,
  689. bool direct)
  690. {
  691. unsigned long next, pages = 0;
  692. pte_t *pte;
  693. void *page_addr;
  694. phys_addr_t phys_addr;
  695. pte = pte_start + pte_index(addr);
  696. for (; addr < end; addr = next, pte++) {
  697. next = (addr + PAGE_SIZE) & PAGE_MASK;
  698. if (next > end)
  699. next = end;
  700. if (!pte_present(*pte))
  701. continue;
  702. /*
  703. * We mapped [0,1G) memory as identity mapping when
  704. * initializing, in arch/x86/kernel/head_64.S. These
  705. * pagetables cannot be removed.
  706. */
  707. phys_addr = pte_val(*pte) + (addr & PAGE_MASK);
  708. if (phys_addr < (phys_addr_t)0x40000000)
  709. return;
  710. if (IS_ALIGNED(addr, PAGE_SIZE) &&
  711. IS_ALIGNED(next, PAGE_SIZE)) {
  712. /*
  713. * Do not free direct mapping pages since they were
  714. * freed when offlining, or simplely not in use.
  715. */
  716. if (!direct)
  717. free_pagetable(pte_page(*pte), 0);
  718. spin_lock(&init_mm.page_table_lock);
  719. pte_clear(&init_mm, addr, pte);
  720. spin_unlock(&init_mm.page_table_lock);
  721. /* For non-direct mapping, pages means nothing. */
  722. pages++;
  723. } else {
  724. /*
  725. * If we are here, we are freeing vmemmap pages since
  726. * direct mapped memory ranges to be freed are aligned.
  727. *
  728. * If we are not removing the whole page, it means
  729. * other page structs in this page are being used and
  730. * we canot remove them. So fill the unused page_structs
  731. * with 0xFD, and remove the page when it is wholly
  732. * filled with 0xFD.
  733. */
  734. memset((void *)addr, PAGE_INUSE, next - addr);
  735. page_addr = page_address(pte_page(*pte));
  736. if (!memchr_inv(page_addr, PAGE_INUSE, PAGE_SIZE)) {
  737. free_pagetable(pte_page(*pte), 0);
  738. spin_lock(&init_mm.page_table_lock);
  739. pte_clear(&init_mm, addr, pte);
  740. spin_unlock(&init_mm.page_table_lock);
  741. }
  742. }
  743. }
  744. /* Call free_pte_table() in remove_pmd_table(). */
  745. flush_tlb_all();
  746. if (direct)
  747. update_page_count(PG_LEVEL_4K, -pages);
  748. }
  749. static void __meminit
  750. remove_pmd_table(pmd_t *pmd_start, unsigned long addr, unsigned long end,
  751. bool direct)
  752. {
  753. unsigned long next, pages = 0;
  754. pte_t *pte_base;
  755. pmd_t *pmd;
  756. void *page_addr;
  757. pmd = pmd_start + pmd_index(addr);
  758. for (; addr < end; addr = next, pmd++) {
  759. next = pmd_addr_end(addr, end);
  760. if (!pmd_present(*pmd))
  761. continue;
  762. if (pmd_large(*pmd)) {
  763. if (IS_ALIGNED(addr, PMD_SIZE) &&
  764. IS_ALIGNED(next, PMD_SIZE)) {
  765. if (!direct)
  766. free_pagetable(pmd_page(*pmd),
  767. get_order(PMD_SIZE));
  768. spin_lock(&init_mm.page_table_lock);
  769. pmd_clear(pmd);
  770. spin_unlock(&init_mm.page_table_lock);
  771. pages++;
  772. } else {
  773. /* If here, we are freeing vmemmap pages. */
  774. memset((void *)addr, PAGE_INUSE, next - addr);
  775. page_addr = page_address(pmd_page(*pmd));
  776. if (!memchr_inv(page_addr, PAGE_INUSE,
  777. PMD_SIZE)) {
  778. free_pagetable(pmd_page(*pmd),
  779. get_order(PMD_SIZE));
  780. spin_lock(&init_mm.page_table_lock);
  781. pmd_clear(pmd);
  782. spin_unlock(&init_mm.page_table_lock);
  783. }
  784. }
  785. continue;
  786. }
  787. pte_base = (pte_t *)pmd_page_vaddr(*pmd);
  788. remove_pte_table(pte_base, addr, next, direct);
  789. free_pte_table(pte_base, pmd);
  790. }
  791. /* Call free_pmd_table() in remove_pud_table(). */
  792. if (direct)
  793. update_page_count(PG_LEVEL_2M, -pages);
  794. }
  795. static void __meminit
  796. remove_pud_table(pud_t *pud_start, unsigned long addr, unsigned long end,
  797. bool direct)
  798. {
  799. unsigned long next, pages = 0;
  800. pmd_t *pmd_base;
  801. pud_t *pud;
  802. void *page_addr;
  803. pud = pud_start + pud_index(addr);
  804. for (; addr < end; addr = next, pud++) {
  805. next = pud_addr_end(addr, end);
  806. if (!pud_present(*pud))
  807. continue;
  808. if (pud_large(*pud)) {
  809. if (IS_ALIGNED(addr, PUD_SIZE) &&
  810. IS_ALIGNED(next, PUD_SIZE)) {
  811. if (!direct)
  812. free_pagetable(pud_page(*pud),
  813. get_order(PUD_SIZE));
  814. spin_lock(&init_mm.page_table_lock);
  815. pud_clear(pud);
  816. spin_unlock(&init_mm.page_table_lock);
  817. pages++;
  818. } else {
  819. /* If here, we are freeing vmemmap pages. */
  820. memset((void *)addr, PAGE_INUSE, next - addr);
  821. page_addr = page_address(pud_page(*pud));
  822. if (!memchr_inv(page_addr, PAGE_INUSE,
  823. PUD_SIZE)) {
  824. free_pagetable(pud_page(*pud),
  825. get_order(PUD_SIZE));
  826. spin_lock(&init_mm.page_table_lock);
  827. pud_clear(pud);
  828. spin_unlock(&init_mm.page_table_lock);
  829. }
  830. }
  831. continue;
  832. }
  833. pmd_base = (pmd_t *)pud_page_vaddr(*pud);
  834. remove_pmd_table(pmd_base, addr, next, direct);
  835. free_pmd_table(pmd_base, pud);
  836. }
  837. if (direct)
  838. update_page_count(PG_LEVEL_1G, -pages);
  839. }
  840. /* start and end are both virtual address. */
  841. static void __meminit
  842. remove_pagetable(unsigned long start, unsigned long end, bool direct)
  843. {
  844. unsigned long next;
  845. unsigned long addr;
  846. pgd_t *pgd;
  847. pud_t *pud;
  848. bool pgd_changed = false;
  849. for (addr = start; addr < end; addr = next) {
  850. next = pgd_addr_end(addr, end);
  851. pgd = pgd_offset_k(addr);
  852. if (!pgd_present(*pgd))
  853. continue;
  854. pud = (pud_t *)pgd_page_vaddr(*pgd);
  855. remove_pud_table(pud, addr, next, direct);
  856. if (free_pud_table(pud, pgd))
  857. pgd_changed = true;
  858. }
  859. if (pgd_changed)
  860. sync_global_pgds(start, end - 1, 1);
  861. flush_tlb_all();
  862. }
  863. void __ref vmemmap_free(unsigned long start, unsigned long end)
  864. {
  865. remove_pagetable(start, end, false);
  866. }
  867. #ifdef CONFIG_MEMORY_HOTREMOVE
  868. static void __meminit
  869. kernel_physical_mapping_remove(unsigned long start, unsigned long end)
  870. {
  871. start = (unsigned long)__va(start);
  872. end = (unsigned long)__va(end);
  873. remove_pagetable(start, end, true);
  874. }
  875. int __ref arch_remove_memory(u64 start, u64 size)
  876. {
  877. unsigned long start_pfn = start >> PAGE_SHIFT;
  878. unsigned long nr_pages = size >> PAGE_SHIFT;
  879. struct zone *zone;
  880. int ret;
  881. zone = page_zone(pfn_to_page(start_pfn));
  882. kernel_physical_mapping_remove(start, start + size);
  883. ret = __remove_pages(zone, start_pfn, nr_pages);
  884. WARN_ON_ONCE(ret);
  885. return ret;
  886. }
  887. #endif
  888. #endif /* CONFIG_MEMORY_HOTPLUG */
  889. static struct kcore_list kcore_vsyscall;
  890. static void __init register_page_bootmem_info(void)
  891. {
  892. #ifdef CONFIG_NUMA
  893. int i;
  894. for_each_online_node(i)
  895. register_page_bootmem_info_node(NODE_DATA(i));
  896. #endif
  897. }
  898. void __init mem_init(void)
  899. {
  900. pci_iommu_alloc();
  901. /* clear_bss() already clear the empty_zero_page */
  902. register_page_bootmem_info();
  903. /* this will put all memory onto the freelists */
  904. free_all_bootmem();
  905. after_bootmem = 1;
  906. /* Register memory areas for /proc/kcore */
  907. kclist_add(&kcore_vsyscall, (void *)VSYSCALL_ADDR,
  908. PAGE_SIZE, KCORE_OTHER);
  909. mem_init_print_info(NULL);
  910. }
  911. #ifdef CONFIG_DEBUG_RODATA
  912. const int rodata_test_data = 0xC3;
  913. EXPORT_SYMBOL_GPL(rodata_test_data);
  914. int kernel_set_to_readonly;
  915. void set_kernel_text_rw(void)
  916. {
  917. unsigned long start = PFN_ALIGN(_text);
  918. unsigned long end = PFN_ALIGN(__stop___ex_table);
  919. if (!kernel_set_to_readonly)
  920. return;
  921. pr_debug("Set kernel text: %lx - %lx for read write\n",
  922. start, end);
  923. /*
  924. * Make the kernel identity mapping for text RW. Kernel text
  925. * mapping will always be RO. Refer to the comment in
  926. * static_protections() in pageattr.c
  927. */
  928. set_memory_rw(start, (end - start) >> PAGE_SHIFT);
  929. }
  930. void set_kernel_text_ro(void)
  931. {
  932. unsigned long start = PFN_ALIGN(_text);
  933. unsigned long end = PFN_ALIGN(__stop___ex_table);
  934. if (!kernel_set_to_readonly)
  935. return;
  936. pr_debug("Set kernel text: %lx - %lx for read only\n",
  937. start, end);
  938. /*
  939. * Set the kernel identity mapping for text RO.
  940. */
  941. set_memory_ro(start, (end - start) >> PAGE_SHIFT);
  942. }
  943. void mark_rodata_ro(void)
  944. {
  945. unsigned long start = PFN_ALIGN(_text);
  946. unsigned long rodata_start = PFN_ALIGN(__start_rodata);
  947. unsigned long end = (unsigned long) &__end_rodata_hpage_align;
  948. unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
  949. unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
  950. unsigned long all_end;
  951. printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
  952. (end - start) >> 10);
  953. set_memory_ro(start, (end - start) >> PAGE_SHIFT);
  954. kernel_set_to_readonly = 1;
  955. /*
  956. * The rodata/data/bss/brk section (but not the kernel text!)
  957. * should also be not-executable.
  958. *
  959. * We align all_end to PMD_SIZE because the existing mapping
  960. * is a full PMD. If we would align _brk_end to PAGE_SIZE we
  961. * split the PMD and the reminder between _brk_end and the end
  962. * of the PMD will remain mapped executable.
  963. *
  964. * Any PMD which was setup after the one which covers _brk_end
  965. * has been zapped already via cleanup_highmem().
  966. */
  967. all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
  968. set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
  969. rodata_test();
  970. #ifdef CONFIG_CPA_DEBUG
  971. printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
  972. set_memory_rw(start, (end-start) >> PAGE_SHIFT);
  973. printk(KERN_INFO "Testing CPA: again\n");
  974. set_memory_ro(start, (end-start) >> PAGE_SHIFT);
  975. #endif
  976. free_init_pages("unused kernel",
  977. (unsigned long) __va(__pa_symbol(text_end)),
  978. (unsigned long) __va(__pa_symbol(rodata_start)));
  979. free_init_pages("unused kernel",
  980. (unsigned long) __va(__pa_symbol(rodata_end)),
  981. (unsigned long) __va(__pa_symbol(_sdata)));
  982. debug_checkwx();
  983. }
  984. #endif
  985. int kern_addr_valid(unsigned long addr)
  986. {
  987. unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
  988. pgd_t *pgd;
  989. pud_t *pud;
  990. pmd_t *pmd;
  991. pte_t *pte;
  992. if (above != 0 && above != -1UL)
  993. return 0;
  994. pgd = pgd_offset_k(addr);
  995. if (pgd_none(*pgd))
  996. return 0;
  997. pud = pud_offset(pgd, addr);
  998. if (pud_none(*pud))
  999. return 0;
  1000. if (pud_large(*pud))
  1001. return pfn_valid(pud_pfn(*pud));
  1002. pmd = pmd_offset(pud, addr);
  1003. if (pmd_none(*pmd))
  1004. return 0;
  1005. if (pmd_large(*pmd))
  1006. return pfn_valid(pmd_pfn(*pmd));
  1007. pte = pte_offset_kernel(pmd, addr);
  1008. if (pte_none(*pte))
  1009. return 0;
  1010. return pfn_valid(pte_pfn(*pte));
  1011. }
  1012. static unsigned long probe_memory_block_size(void)
  1013. {
  1014. /* start from 2g */
  1015. unsigned long bz = 1UL<<31;
  1016. if (totalram_pages >= (64ULL << (30 - PAGE_SHIFT))) {
  1017. pr_info("Using 2GB memory block size for large-memory system\n");
  1018. return 2UL * 1024 * 1024 * 1024;
  1019. }
  1020. /* less than 64g installed */
  1021. if ((max_pfn << PAGE_SHIFT) < (16UL << 32))
  1022. return MIN_MEMORY_BLOCK_SIZE;
  1023. /* get the tail size */
  1024. while (bz > MIN_MEMORY_BLOCK_SIZE) {
  1025. if (!((max_pfn << PAGE_SHIFT) & (bz - 1)))
  1026. break;
  1027. bz >>= 1;
  1028. }
  1029. printk(KERN_DEBUG "memory block size : %ldMB\n", bz >> 20);
  1030. return bz;
  1031. }
  1032. static unsigned long memory_block_size_probed;
  1033. unsigned long memory_block_size_bytes(void)
  1034. {
  1035. if (!memory_block_size_probed)
  1036. memory_block_size_probed = probe_memory_block_size();
  1037. return memory_block_size_probed;
  1038. }
  1039. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  1040. /*
  1041. * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
  1042. */
  1043. static long __meminitdata addr_start, addr_end;
  1044. static void __meminitdata *p_start, *p_end;
  1045. static int __meminitdata node_start;
  1046. static int __meminit vmemmap_populate_hugepages(unsigned long start,
  1047. unsigned long end, int node)
  1048. {
  1049. unsigned long addr;
  1050. unsigned long next;
  1051. pgd_t *pgd;
  1052. pud_t *pud;
  1053. pmd_t *pmd;
  1054. for (addr = start; addr < end; addr = next) {
  1055. next = pmd_addr_end(addr, end);
  1056. pgd = vmemmap_pgd_populate(addr, node);
  1057. if (!pgd)
  1058. return -ENOMEM;
  1059. pud = vmemmap_pud_populate(pgd, addr, node);
  1060. if (!pud)
  1061. return -ENOMEM;
  1062. pmd = pmd_offset(pud, addr);
  1063. if (pmd_none(*pmd)) {
  1064. void *p;
  1065. p = vmemmap_alloc_block_buf(PMD_SIZE, node);
  1066. if (p) {
  1067. pte_t entry;
  1068. entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
  1069. PAGE_KERNEL_LARGE);
  1070. set_pmd(pmd, __pmd(pte_val(entry)));
  1071. /* check to see if we have contiguous blocks */
  1072. if (p_end != p || node_start != node) {
  1073. if (p_start)
  1074. pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
  1075. addr_start, addr_end-1, p_start, p_end-1, node_start);
  1076. addr_start = addr;
  1077. node_start = node;
  1078. p_start = p;
  1079. }
  1080. addr_end = addr + PMD_SIZE;
  1081. p_end = p + PMD_SIZE;
  1082. continue;
  1083. }
  1084. } else if (pmd_large(*pmd)) {
  1085. vmemmap_verify((pte_t *)pmd, node, addr, next);
  1086. continue;
  1087. }
  1088. pr_warn_once("vmemmap: falling back to regular page backing\n");
  1089. if (vmemmap_populate_basepages(addr, next, node))
  1090. return -ENOMEM;
  1091. }
  1092. return 0;
  1093. }
  1094. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
  1095. {
  1096. int err;
  1097. if (cpu_has_pse)
  1098. err = vmemmap_populate_hugepages(start, end, node);
  1099. else
  1100. err = vmemmap_populate_basepages(start, end, node);
  1101. if (!err)
  1102. sync_global_pgds(start, end - 1, 0);
  1103. return err;
  1104. }
  1105. #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_HAVE_BOOTMEM_INFO_NODE)
  1106. void register_page_bootmem_memmap(unsigned long section_nr,
  1107. struct page *start_page, unsigned long size)
  1108. {
  1109. unsigned long addr = (unsigned long)start_page;
  1110. unsigned long end = (unsigned long)(start_page + size);
  1111. unsigned long next;
  1112. pgd_t *pgd;
  1113. pud_t *pud;
  1114. pmd_t *pmd;
  1115. unsigned int nr_pages;
  1116. struct page *page;
  1117. for (; addr < end; addr = next) {
  1118. pte_t *pte = NULL;
  1119. pgd = pgd_offset_k(addr);
  1120. if (pgd_none(*pgd)) {
  1121. next = (addr + PAGE_SIZE) & PAGE_MASK;
  1122. continue;
  1123. }
  1124. get_page_bootmem(section_nr, pgd_page(*pgd), MIX_SECTION_INFO);
  1125. pud = pud_offset(pgd, addr);
  1126. if (pud_none(*pud)) {
  1127. next = (addr + PAGE_SIZE) & PAGE_MASK;
  1128. continue;
  1129. }
  1130. get_page_bootmem(section_nr, pud_page(*pud), MIX_SECTION_INFO);
  1131. if (!cpu_has_pse) {
  1132. next = (addr + PAGE_SIZE) & PAGE_MASK;
  1133. pmd = pmd_offset(pud, addr);
  1134. if (pmd_none(*pmd))
  1135. continue;
  1136. get_page_bootmem(section_nr, pmd_page(*pmd),
  1137. MIX_SECTION_INFO);
  1138. pte = pte_offset_kernel(pmd, addr);
  1139. if (pte_none(*pte))
  1140. continue;
  1141. get_page_bootmem(section_nr, pte_page(*pte),
  1142. SECTION_INFO);
  1143. } else {
  1144. next = pmd_addr_end(addr, end);
  1145. pmd = pmd_offset(pud, addr);
  1146. if (pmd_none(*pmd))
  1147. continue;
  1148. nr_pages = 1 << (get_order(PMD_SIZE));
  1149. page = pmd_page(*pmd);
  1150. while (nr_pages--)
  1151. get_page_bootmem(section_nr, page++,
  1152. SECTION_INFO);
  1153. }
  1154. }
  1155. }
  1156. #endif
  1157. void __meminit vmemmap_populate_print_last(void)
  1158. {
  1159. if (p_start) {
  1160. pr_debug(" [%lx-%lx] PMD -> [%p-%p] on node %d\n",
  1161. addr_start, addr_end-1, p_start, p_end-1, node_start);
  1162. p_start = NULL;
  1163. p_end = NULL;
  1164. node_start = 0;
  1165. }
  1166. }
  1167. #endif