interval_tree.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * mm/interval_tree.c - interval tree for mapping->i_mmap
  3. *
  4. * Copyright (C) 2012, Michel Lespinasse <walken@google.com>
  5. *
  6. * This file is released under the GPL v2.
  7. */
  8. #include <linux/mm.h>
  9. #include <linux/fs.h>
  10. #include <linux/rmap.h>
  11. #include <linux/interval_tree_generic.h>
  12. static inline unsigned long vma_start_pgoff(struct vm_area_struct *v)
  13. {
  14. return v->vm_pgoff;
  15. }
  16. static inline unsigned long vma_last_pgoff(struct vm_area_struct *v)
  17. {
  18. return v->vm_pgoff + ((v->vm_end - v->vm_start) >> PAGE_SHIFT) - 1;
  19. }
  20. INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb,
  21. unsigned long, shared.rb_subtree_last,
  22. vma_start_pgoff, vma_last_pgoff,, vma_interval_tree)
  23. /* Insert node immediately after prev in the interval tree */
  24. void vma_interval_tree_insert_after(struct vm_area_struct *node,
  25. struct vm_area_struct *prev,
  26. struct rb_root *root)
  27. {
  28. struct rb_node **link;
  29. struct vm_area_struct *parent;
  30. unsigned long last = vma_last_pgoff(node);
  31. VM_BUG_ON_VMA(vma_start_pgoff(node) != vma_start_pgoff(prev), node);
  32. if (!prev->shared.rb.rb_right) {
  33. parent = prev;
  34. link = &prev->shared.rb.rb_right;
  35. } else {
  36. parent = rb_entry(prev->shared.rb.rb_right,
  37. struct vm_area_struct, shared.rb);
  38. if (parent->shared.rb_subtree_last < last)
  39. parent->shared.rb_subtree_last = last;
  40. while (parent->shared.rb.rb_left) {
  41. parent = rb_entry(parent->shared.rb.rb_left,
  42. struct vm_area_struct, shared.rb);
  43. if (parent->shared.rb_subtree_last < last)
  44. parent->shared.rb_subtree_last = last;
  45. }
  46. link = &parent->shared.rb.rb_left;
  47. }
  48. node->shared.rb_subtree_last = last;
  49. rb_link_node(&node->shared.rb, &parent->shared.rb, link);
  50. rb_insert_augmented(&node->shared.rb, root,
  51. &vma_interval_tree_augment);
  52. }
  53. static inline unsigned long avc_start_pgoff(struct anon_vma_chain *avc)
  54. {
  55. return vma_start_pgoff(avc->vma);
  56. }
  57. static inline unsigned long avc_last_pgoff(struct anon_vma_chain *avc)
  58. {
  59. return vma_last_pgoff(avc->vma);
  60. }
  61. INTERVAL_TREE_DEFINE(struct anon_vma_chain, rb, unsigned long, rb_subtree_last,
  62. avc_start_pgoff, avc_last_pgoff,
  63. static inline, __anon_vma_interval_tree)
  64. void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
  65. struct rb_root *root)
  66. {
  67. #ifdef CONFIG_DEBUG_VM_RB
  68. node->cached_vma_start = avc_start_pgoff(node);
  69. node->cached_vma_last = avc_last_pgoff(node);
  70. #endif
  71. __anon_vma_interval_tree_insert(node, root);
  72. }
  73. void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
  74. struct rb_root *root)
  75. {
  76. __anon_vma_interval_tree_remove(node, root);
  77. }
  78. struct anon_vma_chain *
  79. anon_vma_interval_tree_iter_first(struct rb_root *root,
  80. unsigned long first, unsigned long last)
  81. {
  82. return __anon_vma_interval_tree_iter_first(root, first, last);
  83. }
  84. struct anon_vma_chain *
  85. anon_vma_interval_tree_iter_next(struct anon_vma_chain *node,
  86. unsigned long first, unsigned long last)
  87. {
  88. return __anon_vma_interval_tree_iter_next(node, first, last);
  89. }
  90. #ifdef CONFIG_DEBUG_VM_RB
  91. void anon_vma_interval_tree_verify(struct anon_vma_chain *node)
  92. {
  93. WARN_ON_ONCE(node->cached_vma_start != avc_start_pgoff(node));
  94. WARN_ON_ONCE(node->cached_vma_last != avc_last_pgoff(node));
  95. }
  96. #endif