kmemcheck.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. #include <linux/gfp.h>
  2. #include <linux/mm_types.h>
  3. #include <linux/mm.h>
  4. #include <linux/slab.h>
  5. #include "slab.h"
  6. #include <linux/kmemcheck.h>
  7. void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
  8. {
  9. struct page *shadow;
  10. int pages;
  11. int i;
  12. pages = 1 << order;
  13. /*
  14. * With kmemcheck enabled, we need to allocate a memory area for the
  15. * shadow bits as well.
  16. */
  17. shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
  18. if (!shadow) {
  19. if (printk_ratelimit())
  20. printk(KERN_ERR "kmemcheck: failed to allocate "
  21. "shadow bitmap\n");
  22. return;
  23. }
  24. for(i = 0; i < pages; ++i)
  25. page[i].shadow = page_address(&shadow[i]);
  26. /*
  27. * Mark it as non-present for the MMU so that our accesses to
  28. * this memory will trigger a page fault and let us analyze
  29. * the memory accesses.
  30. */
  31. kmemcheck_hide_pages(page, pages);
  32. }
  33. void kmemcheck_free_shadow(struct page *page, int order)
  34. {
  35. struct page *shadow;
  36. int pages;
  37. int i;
  38. if (!kmemcheck_page_is_tracked(page))
  39. return;
  40. pages = 1 << order;
  41. kmemcheck_show_pages(page, pages);
  42. shadow = virt_to_page(page[0].shadow);
  43. for(i = 0; i < pages; ++i)
  44. page[i].shadow = NULL;
  45. __free_pages(shadow, order);
  46. }
  47. void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
  48. size_t size)
  49. {
  50. /*
  51. * Has already been memset(), which initializes the shadow for us
  52. * as well.
  53. */
  54. if (gfpflags & __GFP_ZERO)
  55. return;
  56. /* No need to initialize the shadow of a non-tracked slab. */
  57. if (s->flags & SLAB_NOTRACK)
  58. return;
  59. if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
  60. /*
  61. * Allow notracked objects to be allocated from
  62. * tracked caches. Note however that these objects
  63. * will still get page faults on access, they just
  64. * won't ever be flagged as uninitialized. If page
  65. * faults are not acceptable, the slab cache itself
  66. * should be marked NOTRACK.
  67. */
  68. kmemcheck_mark_initialized(object, size);
  69. } else if (!s->ctor) {
  70. /*
  71. * New objects should be marked uninitialized before
  72. * they're returned to the called.
  73. */
  74. kmemcheck_mark_uninitialized(object, size);
  75. }
  76. }
  77. void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
  78. {
  79. /* TODO: RCU freeing is unsupported for now; hide false positives. */
  80. if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
  81. kmemcheck_mark_freed(object, size);
  82. }
  83. void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
  84. gfp_t gfpflags)
  85. {
  86. int pages;
  87. if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
  88. return;
  89. pages = 1 << order;
  90. /*
  91. * NOTE: We choose to track GFP_ZERO pages too; in fact, they
  92. * can become uninitialized by copying uninitialized memory
  93. * into them.
  94. */
  95. /* XXX: Can use zone->node for node? */
  96. kmemcheck_alloc_shadow(page, order, gfpflags, -1);
  97. if (gfpflags & __GFP_ZERO)
  98. kmemcheck_mark_initialized_pages(page, pages);
  99. else
  100. kmemcheck_mark_uninitialized_pages(page, pages);
  101. }