debug-pagealloc.c 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. #include <linux/kernel.h>
  2. #include <linux/string.h>
  3. #include <linux/mm.h>
  4. #include <linux/highmem.h>
  5. #include <linux/page_ext.h>
  6. #include <linux/poison.h>
  7. #include <linux/ratelimit.h>
  8. static bool page_poisoning_enabled __read_mostly;
  9. static bool need_page_poisoning(void)
  10. {
  11. if (!debug_pagealloc_enabled())
  12. return false;
  13. return true;
  14. }
  15. static void init_page_poisoning(void)
  16. {
  17. if (!debug_pagealloc_enabled())
  18. return;
  19. page_poisoning_enabled = true;
  20. }
  21. struct page_ext_operations page_poisoning_ops = {
  22. .need = need_page_poisoning,
  23. .init = init_page_poisoning,
  24. };
  25. static inline void set_page_poison(struct page *page)
  26. {
  27. struct page_ext *page_ext;
  28. page_ext = lookup_page_ext(page);
  29. if (!page_ext)
  30. return;
  31. __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  32. }
  33. static inline void clear_page_poison(struct page *page)
  34. {
  35. struct page_ext *page_ext;
  36. page_ext = lookup_page_ext(page);
  37. if (!page_ext)
  38. return;
  39. __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  40. }
  41. static inline bool page_poison(struct page *page)
  42. {
  43. struct page_ext *page_ext;
  44. page_ext = lookup_page_ext(page);
  45. if (!page_ext)
  46. return false;
  47. return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
  48. }
  49. static void poison_page(struct page *page)
  50. {
  51. void *addr = kmap_atomic(page);
  52. set_page_poison(page);
  53. memset(addr, PAGE_POISON, PAGE_SIZE);
  54. kunmap_atomic(addr);
  55. }
  56. static void poison_pages(struct page *page, int n)
  57. {
  58. int i;
  59. for (i = 0; i < n; i++)
  60. poison_page(page + i);
  61. }
  62. static bool single_bit_flip(unsigned char a, unsigned char b)
  63. {
  64. unsigned char error = a ^ b;
  65. return error && !(error & (error - 1));
  66. }
  67. static void check_poison_mem(unsigned char *mem, size_t bytes)
  68. {
  69. static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
  70. unsigned char *start;
  71. unsigned char *end;
  72. start = memchr_inv(mem, PAGE_POISON, bytes);
  73. if (!start)
  74. return;
  75. for (end = mem + bytes - 1; end > start; end--) {
  76. if (*end != PAGE_POISON)
  77. break;
  78. }
  79. if (!__ratelimit(&ratelimit))
  80. return;
  81. else if (start == end && single_bit_flip(*start, PAGE_POISON))
  82. printk(KERN_ERR "pagealloc: single bit error\n");
  83. else
  84. printk(KERN_ERR "pagealloc: memory corruption\n");
  85. print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
  86. end - start + 1, 1);
  87. dump_stack();
  88. }
  89. static void unpoison_page(struct page *page)
  90. {
  91. void *addr;
  92. if (!page_poison(page))
  93. return;
  94. addr = kmap_atomic(page);
  95. check_poison_mem(addr, PAGE_SIZE);
  96. clear_page_poison(page);
  97. kunmap_atomic(addr);
  98. }
  99. static void unpoison_pages(struct page *page, int n)
  100. {
  101. int i;
  102. for (i = 0; i < n; i++)
  103. unpoison_page(page + i);
  104. }
  105. void __kernel_map_pages(struct page *page, int numpages, int enable)
  106. {
  107. if (!page_poisoning_enabled)
  108. return;
  109. if (enable)
  110. unpoison_pages(page, numpages);
  111. else
  112. poison_pages(page, numpages);
  113. }