oom.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120
  1. #ifndef __INCLUDE_LINUX_OOM_H
  2. #define __INCLUDE_LINUX_OOM_H
  3. #include <linux/sched.h>
  4. #include <linux/types.h>
  5. #include <linux/nodemask.h>
  6. #include <uapi/linux/oom.h>
  7. struct zonelist;
  8. struct notifier_block;
  9. struct mem_cgroup;
  10. struct task_struct;
  11. /*
  12. * Details of the page allocation that triggered the oom killer that are used to
  13. * determine what should be killed.
  14. */
  15. struct oom_control {
  16. /* Used to determine cpuset */
  17. struct zonelist *zonelist;
  18. /* Used to determine mempolicy */
  19. nodemask_t *nodemask;
  20. /* Used to determine cpuset and node locality requirement */
  21. const gfp_t gfp_mask;
  22. /*
  23. * order == -1 means the oom kill is required by sysrq, otherwise only
  24. * for display purposes.
  25. */
  26. const int order;
  27. };
  28. /*
  29. * Types of limitations to the nodes from which allocations may occur
  30. */
  31. enum oom_constraint {
  32. CONSTRAINT_NONE,
  33. CONSTRAINT_CPUSET,
  34. CONSTRAINT_MEMORY_POLICY,
  35. CONSTRAINT_MEMCG,
  36. };
  37. enum oom_scan_t {
  38. OOM_SCAN_OK, /* scan thread and find its badness */
  39. OOM_SCAN_CONTINUE, /* do not consider thread for oom kill */
  40. OOM_SCAN_ABORT, /* abort the iteration and return */
  41. OOM_SCAN_SELECT, /* always select this thread first */
  42. };
  43. /* Thread is the potential origin of an oom condition; kill first on oom */
  44. #define OOM_FLAG_ORIGIN ((__force oom_flags_t)0x1)
  45. extern struct mutex oom_lock;
  46. static inline void set_current_oom_origin(void)
  47. {
  48. current->signal->oom_flags |= OOM_FLAG_ORIGIN;
  49. }
  50. static inline void clear_current_oom_origin(void)
  51. {
  52. current->signal->oom_flags &= ~OOM_FLAG_ORIGIN;
  53. }
  54. static inline bool oom_task_origin(const struct task_struct *p)
  55. {
  56. return !!(p->signal->oom_flags & OOM_FLAG_ORIGIN);
  57. }
  58. extern void mark_oom_victim(struct task_struct *tsk);
  59. extern unsigned long oom_badness(struct task_struct *p,
  60. struct mem_cgroup *memcg, const nodemask_t *nodemask,
  61. unsigned long totalpages);
  62. extern int oom_kills_count(void);
  63. extern void note_oom_kill(void);
  64. extern void oom_kill_process(struct oom_control *oc, struct task_struct *p,
  65. unsigned int points, unsigned long totalpages,
  66. struct mem_cgroup *memcg, const char *message);
  67. extern void check_panic_on_oom(struct oom_control *oc,
  68. enum oom_constraint constraint,
  69. struct mem_cgroup *memcg);
  70. extern enum oom_scan_t oom_scan_process_thread(struct oom_control *oc,
  71. struct task_struct *task, unsigned long totalpages);
  72. extern bool out_of_memory(struct oom_control *oc);
  73. extern void exit_oom_victim(void);
  74. extern int register_oom_notifier(struct notifier_block *nb);
  75. extern int unregister_oom_notifier(struct notifier_block *nb);
  76. extern bool oom_killer_disabled;
  77. extern bool oom_killer_disable(void);
  78. extern void oom_killer_enable(void);
  79. extern struct task_struct *find_lock_task_mm(struct task_struct *p);
  80. static inline bool task_will_free_mem(struct task_struct *task)
  81. {
  82. /*
  83. * A coredumping process may sleep for an extended period in exit_mm(),
  84. * so the oom killer cannot assume that the process will promptly exit
  85. * and release memory.
  86. */
  87. return (task->flags & PF_EXITING) &&
  88. !(task->signal->flags & SIGNAL_GROUP_COREDUMP);
  89. }
  90. /* sysctls */
  91. extern int sysctl_oom_dump_tasks;
  92. extern int sysctl_oom_kill_allocating_task;
  93. extern int sysctl_panic_on_oom;
  94. #endif /* _INCLUDE_LINUX_OOM_H */