slub_def.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. #ifndef _LINUX_SLUB_DEF_H
  2. #define _LINUX_SLUB_DEF_H
  3. /*
  4. * SLUB : A Slab allocator without object queues.
  5. *
  6. * (C) 2007 SGI, Christoph Lameter
  7. */
  8. #include <linux/kobject.h>
  9. enum stat_item {
  10. ALLOC_FASTPATH, /* Allocation from cpu slab */
  11. ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
  12. FREE_FASTPATH, /* Free to cpu slab */
  13. FREE_SLOWPATH, /* Freeing not to cpu slab */
  14. FREE_FROZEN, /* Freeing to frozen slab */
  15. FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
  16. FREE_REMOVE_PARTIAL, /* Freeing removes last object */
  17. ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
  18. ALLOC_SLAB, /* Cpu slab acquired from page allocator */
  19. ALLOC_REFILL, /* Refill cpu slab from slab freelist */
  20. ALLOC_NODE_MISMATCH, /* Switching cpu slab */
  21. FREE_SLAB, /* Slab freed to the page allocator */
  22. CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
  23. DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
  24. DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
  25. DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
  26. DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
  27. DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
  28. DEACTIVATE_BYPASS, /* Implicit deactivation */
  29. ORDER_FALLBACK, /* Number of times fallback was necessary */
  30. CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
  31. CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
  32. CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
  33. CPU_PARTIAL_FREE, /* Refill cpu partial on free */
  34. CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
  35. CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
  36. NR_SLUB_STAT_ITEMS };
  37. struct kmem_cache_cpu {
  38. void **freelist; /* Pointer to next available object */
  39. unsigned long tid; /* Globally unique transaction id */
  40. struct page *page; /* The slab from which we are allocating */
  41. struct page *partial; /* Partially allocated frozen slabs */
  42. #ifdef CONFIG_SLUB_STATS
  43. unsigned stat[NR_SLUB_STAT_ITEMS];
  44. #endif
  45. };
  46. /*
  47. * Word size structure that can be atomically updated or read and that
  48. * contains both the order and the number of objects that a slab of the
  49. * given order would contain.
  50. */
  51. struct kmem_cache_order_objects {
  52. unsigned long x;
  53. };
  54. /*
  55. * Slab cache management.
  56. */
  57. struct kmem_cache {
  58. struct kmem_cache_cpu __percpu *cpu_slab;
  59. /* Used for retriving partial slabs etc */
  60. unsigned long flags;
  61. unsigned long min_partial;
  62. int size; /* The size of an object including meta data */
  63. int object_size; /* The size of an object without meta data */
  64. int offset; /* Free pointer offset. */
  65. /* Number of per cpu partial objects to keep around */
  66. unsigned int cpu_partial;
  67. struct kmem_cache_order_objects oo;
  68. /* Allocation and freeing of slabs */
  69. struct kmem_cache_order_objects max;
  70. struct kmem_cache_order_objects min;
  71. gfp_t allocflags; /* gfp flags to use on each alloc */
  72. int refcount; /* Refcount for slab cache destroy */
  73. void (*ctor)(void *);
  74. int inuse; /* Offset to metadata */
  75. int align; /* Alignment */
  76. int reserved; /* Reserved bytes at the end of slabs */
  77. const char *name; /* Name (only for display!) */
  78. struct list_head list; /* List of slab caches */
  79. #ifdef CONFIG_SYSFS
  80. struct kobject kobj; /* For sysfs */
  81. #endif
  82. #ifdef CONFIG_MEMCG_KMEM
  83. struct memcg_cache_params memcg_params;
  84. int max_attr_size; /* for propagation, maximum size of a stored attr */
  85. #ifdef CONFIG_SYSFS
  86. struct kset *memcg_kset;
  87. #endif
  88. #endif
  89. #ifdef CONFIG_NUMA
  90. /*
  91. * Defragmentation by allocating from a remote node.
  92. */
  93. int remote_node_defrag_ratio;
  94. #endif
  95. struct kmem_cache_node *node[MAX_NUMNODES];
  96. };
  97. #ifdef CONFIG_SYSFS
  98. #define SLAB_SUPPORTS_SYSFS
  99. void sysfs_slab_remove(struct kmem_cache *);
  100. #else
  101. static inline void sysfs_slab_remove(struct kmem_cache *s)
  102. {
  103. }
  104. #endif
  105. /**
  106. * virt_to_obj - returns address of the beginning of object.
  107. * @s: object's kmem_cache
  108. * @slab_page: address of slab page
  109. * @x: address within object memory range
  110. *
  111. * Returns address of the beginning of object
  112. */
  113. static inline void *virt_to_obj(struct kmem_cache *s,
  114. const void *slab_page,
  115. const void *x)
  116. {
  117. return (void *)x - ((x - slab_page) % s->size);
  118. }
  119. void object_err(struct kmem_cache *s, struct page *page,
  120. u8 *object, char *reason);
  121. #endif /* _LINUX_SLUB_DEF_H */