mmu.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711
  1. /*
  2. * Based on arch/arm/mm/mmu.c
  3. *
  4. * Copyright (C) 1995-2005 Russell King
  5. * Copyright (C) 2012 ARM Ltd.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. * You should have received a copy of the GNU General Public License
  17. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. */
  19. #include <linux/export.h>
  20. #include <linux/kernel.h>
  21. #include <linux/errno.h>
  22. #include <linux/init.h>
  23. #include <linux/libfdt.h>
  24. #include <linux/mman.h>
  25. #include <linux/nodemask.h>
  26. #include <linux/memblock.h>
  27. #include <linux/fs.h>
  28. #include <linux/io.h>
  29. #include <linux/slab.h>
  30. #include <linux/stop_machine.h>
  31. #include <asm/cputype.h>
  32. #include <asm/fixmap.h>
  33. #include <asm/kernel-pgtable.h>
  34. #include <asm/sections.h>
  35. #include <asm/setup.h>
  36. #include <asm/sizes.h>
  37. #include <asm/tlb.h>
  38. #include <asm/memblock.h>
  39. #include <asm/mmu_context.h>
  40. #include "mm.h"
  41. u64 idmap_t0sz = TCR_T0SZ(VA_BITS);
  42. /*
  43. * Empty_zero_page is a special page that is used for zero-initialized data
  44. * and COW.
  45. */
  46. struct page *empty_zero_page;
  47. EXPORT_SYMBOL(empty_zero_page);
  48. pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
  49. unsigned long size, pgprot_t vma_prot)
  50. {
  51. if (!pfn_valid(pfn))
  52. return pgprot_noncached(vma_prot);
  53. else if (file->f_flags & O_SYNC)
  54. return pgprot_writecombine(vma_prot);
  55. return vma_prot;
  56. }
  57. EXPORT_SYMBOL(phys_mem_access_prot);
  58. static void __init *early_alloc(unsigned long sz)
  59. {
  60. phys_addr_t phys;
  61. void *ptr;
  62. phys = memblock_alloc(sz, sz);
  63. BUG_ON(!phys);
  64. ptr = __va(phys);
  65. memset(ptr, 0, sz);
  66. return ptr;
  67. }
  68. /*
  69. * remap a PMD into pages
  70. */
  71. static void split_pmd(pmd_t *pmd, pte_t *pte)
  72. {
  73. unsigned long pfn = pmd_pfn(*pmd);
  74. int i = 0;
  75. do {
  76. /*
  77. * Need to have the least restrictive permissions available
  78. * permissions will be fixed up later
  79. */
  80. set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
  81. pfn++;
  82. } while (pte++, i++, i < PTRS_PER_PTE);
  83. }
  84. static void alloc_init_pte(pmd_t *pmd, unsigned long addr,
  85. unsigned long end, unsigned long pfn,
  86. pgprot_t prot,
  87. void *(*alloc)(unsigned long size))
  88. {
  89. pte_t *pte;
  90. if (pmd_none(*pmd) || pmd_sect(*pmd)) {
  91. pte = alloc(PTRS_PER_PTE * sizeof(pte_t));
  92. if (pmd_sect(*pmd))
  93. split_pmd(pmd, pte);
  94. __pmd_populate(pmd, __pa(pte), PMD_TYPE_TABLE);
  95. flush_tlb_all();
  96. }
  97. BUG_ON(pmd_bad(*pmd));
  98. pte = pte_offset_kernel(pmd, addr);
  99. do {
  100. set_pte(pte, pfn_pte(pfn, prot));
  101. pfn++;
  102. } while (pte++, addr += PAGE_SIZE, addr != end);
  103. }
  104. static void split_pud(pud_t *old_pud, pmd_t *pmd)
  105. {
  106. unsigned long addr = pud_pfn(*old_pud) << PAGE_SHIFT;
  107. pgprot_t prot = __pgprot(pud_val(*old_pud) ^ addr);
  108. int i = 0;
  109. do {
  110. set_pmd(pmd, __pmd(addr | pgprot_val(prot)));
  111. addr += PMD_SIZE;
  112. } while (pmd++, i++, i < PTRS_PER_PMD);
  113. }
  114. static void alloc_init_pmd(struct mm_struct *mm, pud_t *pud,
  115. unsigned long addr, unsigned long end,
  116. phys_addr_t phys, pgprot_t prot,
  117. void *(*alloc)(unsigned long size))
  118. {
  119. pmd_t *pmd;
  120. unsigned long next;
  121. /*
  122. * Check for initial section mappings in the pgd/pud and remove them.
  123. */
  124. if (pud_none(*pud) || pud_sect(*pud)) {
  125. pmd = alloc(PTRS_PER_PMD * sizeof(pmd_t));
  126. if (pud_sect(*pud)) {
  127. /*
  128. * need to have the 1G of mappings continue to be
  129. * present
  130. */
  131. split_pud(pud, pmd);
  132. }
  133. pud_populate(mm, pud, pmd);
  134. flush_tlb_all();
  135. }
  136. BUG_ON(pud_bad(*pud));
  137. pmd = pmd_offset(pud, addr);
  138. do {
  139. next = pmd_addr_end(addr, end);
  140. /* try section mapping first */
  141. if (((addr | next | phys) & ~SECTION_MASK) == 0) {
  142. pmd_t old_pmd =*pmd;
  143. set_pmd(pmd, __pmd(phys |
  144. pgprot_val(mk_sect_prot(prot))));
  145. /*
  146. * Check for previous table entries created during
  147. * boot (__create_page_tables) and flush them.
  148. */
  149. if (!pmd_none(old_pmd)) {
  150. flush_tlb_all();
  151. if (pmd_table(old_pmd)) {
  152. phys_addr_t table = __pa(pte_offset_map(&old_pmd, 0));
  153. if (!WARN_ON_ONCE(slab_is_available()))
  154. memblock_free(table, PAGE_SIZE);
  155. }
  156. }
  157. } else {
  158. alloc_init_pte(pmd, addr, next, __phys_to_pfn(phys),
  159. prot, alloc);
  160. }
  161. phys += next - addr;
  162. } while (pmd++, addr = next, addr != end);
  163. }
  164. static inline bool use_1G_block(unsigned long addr, unsigned long next,
  165. unsigned long phys)
  166. {
  167. if (PAGE_SHIFT != 12)
  168. return false;
  169. if (((addr | next | phys) & ~PUD_MASK) != 0)
  170. return false;
  171. return true;
  172. }
  173. static void alloc_init_pud(struct mm_struct *mm, pgd_t *pgd,
  174. unsigned long addr, unsigned long end,
  175. phys_addr_t phys, pgprot_t prot,
  176. void *(*alloc)(unsigned long size))
  177. {
  178. pud_t *pud;
  179. unsigned long next;
  180. if (pgd_none(*pgd)) {
  181. pud = alloc(PTRS_PER_PUD * sizeof(pud_t));
  182. pgd_populate(mm, pgd, pud);
  183. }
  184. BUG_ON(pgd_bad(*pgd));
  185. pud = pud_offset(pgd, addr);
  186. do {
  187. next = pud_addr_end(addr, end);
  188. /*
  189. * For 4K granule only, attempt to put down a 1GB block
  190. */
  191. if (use_1G_block(addr, next, phys)) {
  192. pud_t old_pud = *pud;
  193. set_pud(pud, __pud(phys |
  194. pgprot_val(mk_sect_prot(prot))));
  195. /*
  196. * If we have an old value for a pud, it will
  197. * be pointing to a pmd table that we no longer
  198. * need (from swapper_pg_dir).
  199. *
  200. * Look up the old pmd table and free it.
  201. */
  202. if (!pud_none(old_pud)) {
  203. flush_tlb_all();
  204. if (pud_table(old_pud)) {
  205. phys_addr_t table = __pa(pmd_offset(&old_pud, 0));
  206. if (!WARN_ON_ONCE(slab_is_available()))
  207. memblock_free(table, PAGE_SIZE);
  208. }
  209. }
  210. } else {
  211. alloc_init_pmd(mm, pud, addr, next, phys, prot, alloc);
  212. }
  213. phys += next - addr;
  214. } while (pud++, addr = next, addr != end);
  215. }
  216. /*
  217. * Create the page directory entries and any necessary page tables for the
  218. * mapping specified by 'md'.
  219. */
  220. static void __create_mapping(struct mm_struct *mm, pgd_t *pgd,
  221. phys_addr_t phys, unsigned long virt,
  222. phys_addr_t size, pgprot_t prot,
  223. void *(*alloc)(unsigned long size))
  224. {
  225. unsigned long addr, length, end, next;
  226. addr = virt & PAGE_MASK;
  227. length = PAGE_ALIGN(size + (virt & ~PAGE_MASK));
  228. end = addr + length;
  229. do {
  230. next = pgd_addr_end(addr, end);
  231. alloc_init_pud(mm, pgd, addr, next, phys, prot, alloc);
  232. phys += next - addr;
  233. } while (pgd++, addr = next, addr != end);
  234. }
  235. static void *late_alloc(unsigned long size)
  236. {
  237. void *ptr;
  238. BUG_ON(size > PAGE_SIZE);
  239. ptr = (void *)__get_free_page(PGALLOC_GFP);
  240. BUG_ON(!ptr);
  241. return ptr;
  242. }
  243. static void __init create_mapping(phys_addr_t phys, unsigned long virt,
  244. phys_addr_t size, pgprot_t prot)
  245. {
  246. if (virt < VMALLOC_START) {
  247. pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
  248. &phys, virt);
  249. return;
  250. }
  251. __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK), phys, virt,
  252. size, prot, early_alloc);
  253. }
  254. void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys,
  255. unsigned long virt, phys_addr_t size,
  256. pgprot_t prot)
  257. {
  258. __create_mapping(mm, pgd_offset(mm, virt), phys, virt, size, prot,
  259. late_alloc);
  260. }
  261. static void create_mapping_late(phys_addr_t phys, unsigned long virt,
  262. phys_addr_t size, pgprot_t prot)
  263. {
  264. if (virt < VMALLOC_START) {
  265. pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n",
  266. &phys, virt);
  267. return;
  268. }
  269. return __create_mapping(&init_mm, pgd_offset_k(virt & PAGE_MASK),
  270. phys, virt, size, prot, late_alloc);
  271. }
  272. #ifdef CONFIG_DEBUG_RODATA
  273. static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
  274. {
  275. /*
  276. * Set up the executable regions using the existing section mappings
  277. * for now. This will get more fine grained later once all memory
  278. * is mapped
  279. */
  280. unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
  281. unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
  282. if (end < kernel_x_start) {
  283. create_mapping(start, __phys_to_virt(start),
  284. end - start, PAGE_KERNEL);
  285. } else if (start >= kernel_x_end) {
  286. create_mapping(start, __phys_to_virt(start),
  287. end - start, PAGE_KERNEL);
  288. } else {
  289. if (start < kernel_x_start)
  290. create_mapping(start, __phys_to_virt(start),
  291. kernel_x_start - start,
  292. PAGE_KERNEL);
  293. create_mapping(kernel_x_start,
  294. __phys_to_virt(kernel_x_start),
  295. kernel_x_end - kernel_x_start,
  296. PAGE_KERNEL_EXEC);
  297. if (kernel_x_end < end)
  298. create_mapping(kernel_x_end,
  299. __phys_to_virt(kernel_x_end),
  300. end - kernel_x_end,
  301. PAGE_KERNEL);
  302. }
  303. }
  304. #else
  305. static void __init __map_memblock(phys_addr_t start, phys_addr_t end)
  306. {
  307. create_mapping(start, __phys_to_virt(start), end - start,
  308. PAGE_KERNEL_EXEC);
  309. }
  310. #endif
  311. static void __init map_mem(void)
  312. {
  313. struct memblock_region *reg;
  314. phys_addr_t limit;
  315. /*
  316. * Temporarily limit the memblock range. We need to do this as
  317. * create_mapping requires puds, pmds and ptes to be allocated from
  318. * memory addressable from the initial direct kernel mapping.
  319. *
  320. * The initial direct kernel mapping, located at swapper_pg_dir, gives
  321. * us PUD_SIZE (with SECTION maps) or PMD_SIZE (without SECTION maps,
  322. * memory starting from PHYS_OFFSET (which must be aligned to 2MB as
  323. * per Documentation/arm64/booting.txt).
  324. */
  325. limit = PHYS_OFFSET + SWAPPER_INIT_MAP_SIZE;
  326. memblock_set_current_limit(limit);
  327. /* map all the memory banks */
  328. for_each_memblock(memory, reg) {
  329. phys_addr_t start = reg->base;
  330. phys_addr_t end = start + reg->size;
  331. if (start >= end)
  332. break;
  333. if (ARM64_SWAPPER_USES_SECTION_MAPS) {
  334. /*
  335. * For the first memory bank align the start address and
  336. * current memblock limit to prevent create_mapping() from
  337. * allocating pte page tables from unmapped memory. With
  338. * the section maps, if the first block doesn't end on section
  339. * size boundary, create_mapping() will try to allocate a pte
  340. * page, which may be returned from an unmapped area.
  341. * When section maps are not used, the pte page table for the
  342. * current limit is already present in swapper_pg_dir.
  343. */
  344. if (start < limit)
  345. start = ALIGN(start, SECTION_SIZE);
  346. if (end < limit) {
  347. limit = end & SECTION_MASK;
  348. memblock_set_current_limit(limit);
  349. }
  350. }
  351. __map_memblock(start, end);
  352. }
  353. /* Limit no longer required. */
  354. memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE);
  355. }
  356. static void __init fixup_executable(void)
  357. {
  358. #ifdef CONFIG_DEBUG_RODATA
  359. /* now that we are actually fully mapped, make the start/end more fine grained */
  360. if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
  361. unsigned long aligned_start = round_down(__pa(_stext),
  362. SWAPPER_BLOCK_SIZE);
  363. create_mapping(aligned_start, __phys_to_virt(aligned_start),
  364. __pa(_stext) - aligned_start,
  365. PAGE_KERNEL);
  366. }
  367. if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
  368. unsigned long aligned_end = round_up(__pa(__init_end),
  369. SWAPPER_BLOCK_SIZE);
  370. create_mapping(__pa(__init_end), (unsigned long)__init_end,
  371. aligned_end - __pa(__init_end),
  372. PAGE_KERNEL);
  373. }
  374. #endif
  375. }
  376. #ifdef CONFIG_DEBUG_RODATA
  377. void mark_rodata_ro(void)
  378. {
  379. create_mapping_late(__pa(_stext), (unsigned long)_stext,
  380. (unsigned long)_etext - (unsigned long)_stext,
  381. PAGE_KERNEL_ROX);
  382. }
  383. #endif
  384. void fixup_init(void)
  385. {
  386. create_mapping_late(__pa(__init_begin), (unsigned long)__init_begin,
  387. (unsigned long)__init_end - (unsigned long)__init_begin,
  388. PAGE_KERNEL);
  389. }
  390. /*
  391. * paging_init() sets up the page tables, initialises the zone memory
  392. * maps and sets up the zero page.
  393. */
  394. void __init paging_init(void)
  395. {
  396. void *zero_page;
  397. map_mem();
  398. fixup_executable();
  399. /* allocate the zero page. */
  400. zero_page = early_alloc(PAGE_SIZE);
  401. bootmem_init();
  402. empty_zero_page = virt_to_page(zero_page);
  403. /* Ensure the zero page is visible to the page table walker */
  404. dsb(ishst);
  405. /*
  406. * TTBR0 is only used for the identity mapping at this stage. Make it
  407. * point to zero page to avoid speculatively fetching new entries.
  408. */
  409. cpu_set_reserved_ttbr0();
  410. local_flush_tlb_all();
  411. cpu_set_default_tcr_t0sz();
  412. }
  413. /*
  414. * Check whether a kernel address is valid (derived from arch/x86/).
  415. */
  416. int kern_addr_valid(unsigned long addr)
  417. {
  418. pgd_t *pgd;
  419. pud_t *pud;
  420. pmd_t *pmd;
  421. pte_t *pte;
  422. if ((((long)addr) >> VA_BITS) != -1UL)
  423. return 0;
  424. pgd = pgd_offset_k(addr);
  425. if (pgd_none(*pgd))
  426. return 0;
  427. pud = pud_offset(pgd, addr);
  428. if (pud_none(*pud))
  429. return 0;
  430. if (pud_sect(*pud))
  431. return pfn_valid(pud_pfn(*pud));
  432. pmd = pmd_offset(pud, addr);
  433. if (pmd_none(*pmd))
  434. return 0;
  435. if (pmd_sect(*pmd))
  436. return pfn_valid(pmd_pfn(*pmd));
  437. pte = pte_offset_kernel(pmd, addr);
  438. if (pte_none(*pte))
  439. return 0;
  440. return pfn_valid(pte_pfn(*pte));
  441. }
  442. #ifdef CONFIG_SPARSEMEM_VMEMMAP
  443. #if !ARM64_SWAPPER_USES_SECTION_MAPS
  444. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
  445. {
  446. return vmemmap_populate_basepages(start, end, node);
  447. }
  448. #else /* !ARM64_SWAPPER_USES_SECTION_MAPS */
  449. int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node)
  450. {
  451. unsigned long addr = start;
  452. unsigned long next;
  453. pgd_t *pgd;
  454. pud_t *pud;
  455. pmd_t *pmd;
  456. do {
  457. next = pmd_addr_end(addr, end);
  458. pgd = vmemmap_pgd_populate(addr, node);
  459. if (!pgd)
  460. return -ENOMEM;
  461. pud = vmemmap_pud_populate(pgd, addr, node);
  462. if (!pud)
  463. return -ENOMEM;
  464. pmd = pmd_offset(pud, addr);
  465. if (pmd_none(*pmd)) {
  466. void *p = NULL;
  467. p = vmemmap_alloc_block_buf(PMD_SIZE, node);
  468. if (!p)
  469. return -ENOMEM;
  470. set_pmd(pmd, __pmd(__pa(p) | PROT_SECT_NORMAL));
  471. } else
  472. vmemmap_verify((pte_t *)pmd, node, addr, next);
  473. } while (addr = next, addr != end);
  474. return 0;
  475. }
  476. #endif /* CONFIG_ARM64_64K_PAGES */
  477. void vmemmap_free(unsigned long start, unsigned long end)
  478. {
  479. }
  480. #endif /* CONFIG_SPARSEMEM_VMEMMAP */
  481. static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
  482. #if CONFIG_PGTABLE_LEVELS > 2
  483. static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
  484. #endif
  485. #if CONFIG_PGTABLE_LEVELS > 3
  486. static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
  487. #endif
  488. static inline pud_t * fixmap_pud(unsigned long addr)
  489. {
  490. pgd_t *pgd = pgd_offset_k(addr);
  491. BUG_ON(pgd_none(*pgd) || pgd_bad(*pgd));
  492. return pud_offset(pgd, addr);
  493. }
  494. static inline pmd_t * fixmap_pmd(unsigned long addr)
  495. {
  496. pud_t *pud = fixmap_pud(addr);
  497. BUG_ON(pud_none(*pud) || pud_bad(*pud));
  498. return pmd_offset(pud, addr);
  499. }
  500. static inline pte_t * fixmap_pte(unsigned long addr)
  501. {
  502. pmd_t *pmd = fixmap_pmd(addr);
  503. BUG_ON(pmd_none(*pmd) || pmd_bad(*pmd));
  504. return pte_offset_kernel(pmd, addr);
  505. }
  506. void __init early_fixmap_init(void)
  507. {
  508. pgd_t *pgd;
  509. pud_t *pud;
  510. pmd_t *pmd;
  511. unsigned long addr = FIXADDR_START;
  512. pgd = pgd_offset_k(addr);
  513. pgd_populate(&init_mm, pgd, bm_pud);
  514. pud = pud_offset(pgd, addr);
  515. pud_populate(&init_mm, pud, bm_pmd);
  516. pmd = pmd_offset(pud, addr);
  517. pmd_populate_kernel(&init_mm, pmd, bm_pte);
  518. /*
  519. * The boot-ioremap range spans multiple pmds, for which
  520. * we are not preparted:
  521. */
  522. BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
  523. != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
  524. if ((pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)))
  525. || pmd != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) {
  526. WARN_ON(1);
  527. pr_warn("pmd %p != %p, %p\n",
  528. pmd, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)),
  529. fixmap_pmd(fix_to_virt(FIX_BTMAP_END)));
  530. pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
  531. fix_to_virt(FIX_BTMAP_BEGIN));
  532. pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
  533. fix_to_virt(FIX_BTMAP_END));
  534. pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
  535. pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
  536. }
  537. }
  538. void __set_fixmap(enum fixed_addresses idx,
  539. phys_addr_t phys, pgprot_t flags)
  540. {
  541. unsigned long addr = __fix_to_virt(idx);
  542. pte_t *pte;
  543. BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
  544. pte = fixmap_pte(addr);
  545. if (pgprot_val(flags)) {
  546. set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
  547. } else {
  548. pte_clear(&init_mm, addr, pte);
  549. flush_tlb_kernel_range(addr, addr+PAGE_SIZE);
  550. }
  551. }
  552. void *__init fixmap_remap_fdt(phys_addr_t dt_phys)
  553. {
  554. const u64 dt_virt_base = __fix_to_virt(FIX_FDT);
  555. pgprot_t prot = PAGE_KERNEL_RO;
  556. int size, offset;
  557. void *dt_virt;
  558. /*
  559. * Check whether the physical FDT address is set and meets the minimum
  560. * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be
  561. * at least 8 bytes so that we can always access the magic and size
  562. * fields of the FDT header after mapping the first chunk, double check
  563. * here if that is indeed the case.
  564. */
  565. BUILD_BUG_ON(MIN_FDT_ALIGN < 8);
  566. if (!dt_phys || dt_phys % MIN_FDT_ALIGN)
  567. return NULL;
  568. /*
  569. * Make sure that the FDT region can be mapped without the need to
  570. * allocate additional translation table pages, so that it is safe
  571. * to call create_mapping() this early.
  572. *
  573. * On 64k pages, the FDT will be mapped using PTEs, so we need to
  574. * be in the same PMD as the rest of the fixmap.
  575. * On 4k pages, we'll use section mappings for the FDT so we only
  576. * have to be in the same PUD.
  577. */
  578. BUILD_BUG_ON(dt_virt_base % SZ_2M);
  579. BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT !=
  580. __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT);
  581. offset = dt_phys % SWAPPER_BLOCK_SIZE;
  582. dt_virt = (void *)dt_virt_base + offset;
  583. /* map the first chunk so we can read the size from the header */
  584. create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
  585. SWAPPER_BLOCK_SIZE, prot);
  586. if (fdt_magic(dt_virt) != FDT_MAGIC)
  587. return NULL;
  588. size = fdt_totalsize(dt_virt);
  589. if (size > MAX_FDT_SIZE)
  590. return NULL;
  591. if (offset + size > SWAPPER_BLOCK_SIZE)
  592. create_mapping(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base,
  593. round_up(offset + size, SWAPPER_BLOCK_SIZE), prot);
  594. memblock_reserve(dt_phys, size);
  595. return dt_virt;
  596. }
  597. #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
  598. int pud_free_pmd_page(pud_t *pud, unsigned long addr)
  599. {
  600. return pud_none(*pud);
  601. }
  602. int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
  603. {
  604. return pmd_none(*pmd);
  605. }
  606. #endif