kasan_init.c 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165
  1. /*
  2. * This file contains kasan initialization code for ARM64.
  3. *
  4. * Copyright (c) 2015 Samsung Electronics Co., Ltd.
  5. * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. */
  12. #define pr_fmt(fmt) "kasan: " fmt
  13. #include <linux/kasan.h>
  14. #include <linux/kernel.h>
  15. #include <linux/memblock.h>
  16. #include <linux/start_kernel.h>
  17. #include <asm/page.h>
  18. #include <asm/pgalloc.h>
  19. #include <asm/pgtable.h>
  20. #include <asm/tlbflush.h>
  21. static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
  22. static void __init kasan_early_pte_populate(pmd_t *pmd, unsigned long addr,
  23. unsigned long end)
  24. {
  25. pte_t *pte;
  26. unsigned long next;
  27. if (pmd_none(*pmd))
  28. pmd_populate_kernel(&init_mm, pmd, kasan_zero_pte);
  29. pte = pte_offset_kernel(pmd, addr);
  30. do {
  31. next = addr + PAGE_SIZE;
  32. set_pte(pte, pfn_pte(virt_to_pfn(kasan_zero_page),
  33. PAGE_KERNEL));
  34. } while (pte++, addr = next, addr != end && pte_none(*pte));
  35. }
  36. static void __init kasan_early_pmd_populate(pud_t *pud,
  37. unsigned long addr,
  38. unsigned long end)
  39. {
  40. pmd_t *pmd;
  41. unsigned long next;
  42. if (pud_none(*pud))
  43. pud_populate(&init_mm, pud, kasan_zero_pmd);
  44. pmd = pmd_offset(pud, addr);
  45. do {
  46. next = pmd_addr_end(addr, end);
  47. kasan_early_pte_populate(pmd, addr, next);
  48. } while (pmd++, addr = next, addr != end && pmd_none(*pmd));
  49. }
  50. static void __init kasan_early_pud_populate(pgd_t *pgd,
  51. unsigned long addr,
  52. unsigned long end)
  53. {
  54. pud_t *pud;
  55. unsigned long next;
  56. if (pgd_none(*pgd))
  57. pgd_populate(&init_mm, pgd, kasan_zero_pud);
  58. pud = pud_offset(pgd, addr);
  59. do {
  60. next = pud_addr_end(addr, end);
  61. kasan_early_pmd_populate(pud, addr, next);
  62. } while (pud++, addr = next, addr != end && pud_none(*pud));
  63. }
  64. static void __init kasan_map_early_shadow(void)
  65. {
  66. unsigned long addr = KASAN_SHADOW_START;
  67. unsigned long end = KASAN_SHADOW_END;
  68. unsigned long next;
  69. pgd_t *pgd;
  70. pgd = pgd_offset_k(addr);
  71. do {
  72. next = pgd_addr_end(addr, end);
  73. kasan_early_pud_populate(pgd, addr, next);
  74. } while (pgd++, addr = next, addr != end);
  75. }
  76. asmlinkage void __init kasan_early_init(void)
  77. {
  78. BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_END - (1UL << 61));
  79. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
  80. BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
  81. kasan_map_early_shadow();
  82. }
  83. static void __init clear_pgds(unsigned long start,
  84. unsigned long end)
  85. {
  86. /*
  87. * Remove references to kasan page tables from
  88. * swapper_pg_dir. pgd_clear() can't be used
  89. * here because it's nop on 2,3-level pagetable setups
  90. */
  91. for (; start < end; start += PGDIR_SIZE)
  92. set_pgd(pgd_offset_k(start), __pgd(0));
  93. }
  94. static void __init cpu_set_ttbr1(unsigned long ttbr1)
  95. {
  96. asm(
  97. " msr ttbr1_el1, %0\n"
  98. " isb"
  99. :
  100. : "r" (ttbr1));
  101. }
  102. void __init kasan_init(void)
  103. {
  104. struct memblock_region *reg;
  105. /*
  106. * We are going to perform proper setup of shadow memory.
  107. * At first we should unmap early shadow (clear_pgds() call bellow).
  108. * However, instrumented code couldn't execute without shadow memory.
  109. * tmp_pg_dir used to keep early shadow mapped until full shadow
  110. * setup will be finished.
  111. */
  112. memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
  113. cpu_set_ttbr1(__pa(tmp_pg_dir));
  114. flush_tlb_all();
  115. clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
  116. kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
  117. kasan_mem_to_shadow((void *)MODULES_VADDR));
  118. for_each_memblock(memory, reg) {
  119. void *start = (void *)__phys_to_virt(reg->base);
  120. void *end = (void *)__phys_to_virt(reg->base + reg->size);
  121. if (start >= end)
  122. break;
  123. /*
  124. * end + 1 here is intentional. We check several shadow bytes in
  125. * advance to slightly speed up fastpath. In some rare cases
  126. * we could cross boundary of mapped shadow, so we just map
  127. * some more here.
  128. */
  129. vmemmap_populate((unsigned long)kasan_mem_to_shadow(start),
  130. (unsigned long)kasan_mem_to_shadow(end) + 1,
  131. pfn_to_nid(virt_to_pfn(start)));
  132. }
  133. memset(kasan_zero_page, 0, PAGE_SIZE);
  134. cpu_set_ttbr1(__pa(swapper_pg_dir));
  135. flush_tlb_all();
  136. /* At this point kasan is fully initialized. Enable error messages */
  137. init_task.kasan_depth = 0;
  138. pr_info("KernelAddressSanitizer initialized\n");
  139. }