cpu.h 9.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297
  1. /*
  2. * include/linux/cpu.h - generic cpu definition
  3. *
  4. * This is mainly for topological representation. We define the
  5. * basic 'struct cpu' here, which can be embedded in per-arch
  6. * definitions of processors.
  7. *
  8. * Basic handling of the devices is done in drivers/base/cpu.c
  9. *
  10. * CPUs are exported via sysfs in the devices/system/cpu
  11. * directory.
  12. */
  13. #ifndef _LINUX_CPU_H_
  14. #define _LINUX_CPU_H_
  15. #include <linux/node.h>
  16. #include <linux/compiler.h>
  17. #include <linux/cpumask.h>
  18. struct device;
  19. struct device_node;
  20. struct attribute_group;
  21. struct cpu {
  22. int node_id; /* The node which contains the CPU */
  23. int hotpluggable; /* creates sysfs control file if hotpluggable */
  24. struct device dev;
  25. };
  26. extern int register_cpu(struct cpu *cpu, int num);
  27. extern struct device *get_cpu_device(unsigned cpu);
  28. extern bool cpu_is_hotpluggable(unsigned cpu);
  29. extern bool arch_match_cpu_phys_id(int cpu, u64 phys_id);
  30. extern bool arch_find_n_match_cpu_physical_id(struct device_node *cpun,
  31. int cpu, unsigned int *thread);
  32. extern int cpu_add_dev_attr(struct device_attribute *attr);
  33. extern void cpu_remove_dev_attr(struct device_attribute *attr);
  34. extern int cpu_add_dev_attr_group(struct attribute_group *attrs);
  35. extern void cpu_remove_dev_attr_group(struct attribute_group *attrs);
  36. extern ssize_t cpu_show_meltdown(struct device *dev,
  37. struct device_attribute *attr, char *buf);
  38. extern ssize_t cpu_show_spectre_v1(struct device *dev,
  39. struct device_attribute *attr, char *buf);
  40. extern ssize_t cpu_show_spectre_v2(struct device *dev,
  41. struct device_attribute *attr, char *buf);
  42. extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
  43. struct device_attribute *attr, char *buf);
  44. extern ssize_t cpu_show_l1tf(struct device *dev,
  45. struct device_attribute *attr, char *buf);
  46. extern __printf(4, 5)
  47. struct device *cpu_device_create(struct device *parent, void *drvdata,
  48. const struct attribute_group **groups,
  49. const char *fmt, ...);
  50. #ifdef CONFIG_HOTPLUG_CPU
  51. extern void unregister_cpu(struct cpu *cpu);
  52. extern ssize_t arch_cpu_probe(const char *, size_t);
  53. extern ssize_t arch_cpu_release(const char *, size_t);
  54. #endif
  55. struct notifier_block;
  56. /*
  57. * CPU notifier priorities.
  58. */
  59. enum {
  60. /*
  61. * SCHED_ACTIVE marks a cpu which is coming up active during
  62. * CPU_ONLINE and CPU_DOWN_FAILED and must be the first
  63. * notifier. CPUSET_ACTIVE adjusts cpuset according to
  64. * cpu_active mask right after SCHED_ACTIVE. During
  65. * CPU_DOWN_PREPARE, SCHED_INACTIVE and CPUSET_INACTIVE are
  66. * ordered in the similar way.
  67. *
  68. * This ordering guarantees consistent cpu_active mask and
  69. * migration behavior to all cpu notifiers.
  70. */
  71. CPU_PRI_SCHED_ACTIVE = INT_MAX,
  72. CPU_PRI_CPUSET_ACTIVE = INT_MAX - 1,
  73. CPU_PRI_SCHED_INACTIVE = INT_MIN + 1,
  74. CPU_PRI_CPUSET_INACTIVE = INT_MIN,
  75. /* migration should happen before other stuff but after perf */
  76. CPU_PRI_PERF = 20,
  77. CPU_PRI_MIGRATION = 10,
  78. CPU_PRI_SMPBOOT = 9,
  79. /* bring up workqueues before normal notifiers and down after */
  80. CPU_PRI_WORKQUEUE_UP = 5,
  81. CPU_PRI_WORKQUEUE_DOWN = -5,
  82. };
  83. #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
  84. #define CPU_UP_PREPARE 0x0003 /* CPU (unsigned)v coming up */
  85. #define CPU_UP_CANCELED 0x0004 /* CPU (unsigned)v NOT coming up */
  86. #define CPU_DOWN_PREPARE 0x0005 /* CPU (unsigned)v going down */
  87. #define CPU_DOWN_FAILED 0x0006 /* CPU (unsigned)v NOT going down */
  88. #define CPU_DEAD 0x0007 /* CPU (unsigned)v dead */
  89. #define CPU_DYING 0x0008 /* CPU (unsigned)v not running any task,
  90. * not handling interrupts, soon dead.
  91. * Called on the dying cpu, interrupts
  92. * are already disabled. Must not
  93. * sleep, must not fail */
  94. #define CPU_POST_DEAD 0x0009 /* CPU (unsigned)v dead, cpu_hotplug
  95. * lock is dropped */
  96. #define CPU_STARTING 0x000A /* CPU (unsigned)v soon running.
  97. * Called on the new cpu, just before
  98. * enabling interrupts. Must not sleep,
  99. * must not fail */
  100. #define CPU_DYING_IDLE 0x000B /* CPU (unsigned)v dying, reached
  101. * idle loop. */
  102. #define CPU_BROKEN 0x000C /* CPU (unsigned)v did not die properly,
  103. * perhaps due to preemption. */
  104. /* Used for CPU hotplug events occurring while tasks are frozen due to a suspend
  105. * operation in progress
  106. */
  107. #define CPU_TASKS_FROZEN 0x0010
  108. #define CPU_ONLINE_FROZEN (CPU_ONLINE | CPU_TASKS_FROZEN)
  109. #define CPU_UP_PREPARE_FROZEN (CPU_UP_PREPARE | CPU_TASKS_FROZEN)
  110. #define CPU_UP_CANCELED_FROZEN (CPU_UP_CANCELED | CPU_TASKS_FROZEN)
  111. #define CPU_DOWN_PREPARE_FROZEN (CPU_DOWN_PREPARE | CPU_TASKS_FROZEN)
  112. #define CPU_DOWN_FAILED_FROZEN (CPU_DOWN_FAILED | CPU_TASKS_FROZEN)
  113. #define CPU_DEAD_FROZEN (CPU_DEAD | CPU_TASKS_FROZEN)
  114. #define CPU_DYING_FROZEN (CPU_DYING | CPU_TASKS_FROZEN)
  115. #define CPU_STARTING_FROZEN (CPU_STARTING | CPU_TASKS_FROZEN)
  116. #ifdef CONFIG_SMP
  117. /* Need to know about CPUs going up/down? */
  118. #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
  119. #define cpu_notifier(fn, pri) { \
  120. static struct notifier_block fn##_nb = \
  121. { .notifier_call = fn, .priority = pri }; \
  122. register_cpu_notifier(&fn##_nb); \
  123. }
  124. #define __cpu_notifier(fn, pri) { \
  125. static struct notifier_block fn##_nb = \
  126. { .notifier_call = fn, .priority = pri }; \
  127. __register_cpu_notifier(&fn##_nb); \
  128. }
  129. extern int register_cpu_notifier(struct notifier_block *nb);
  130. extern int __register_cpu_notifier(struct notifier_block *nb);
  131. extern void unregister_cpu_notifier(struct notifier_block *nb);
  132. extern void __unregister_cpu_notifier(struct notifier_block *nb);
  133. #else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */
  134. #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
  135. #define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
  136. static inline int register_cpu_notifier(struct notifier_block *nb)
  137. {
  138. return 0;
  139. }
  140. static inline int __register_cpu_notifier(struct notifier_block *nb)
  141. {
  142. return 0;
  143. }
  144. static inline void unregister_cpu_notifier(struct notifier_block *nb)
  145. {
  146. }
  147. static inline void __unregister_cpu_notifier(struct notifier_block *nb)
  148. {
  149. }
  150. #endif
  151. void smpboot_thread_init(void);
  152. int cpu_up(unsigned int cpu);
  153. void notify_cpu_starting(unsigned int cpu);
  154. extern void cpu_maps_update_begin(void);
  155. extern void cpu_maps_update_done(void);
  156. #define cpu_notifier_register_begin cpu_maps_update_begin
  157. #define cpu_notifier_register_done cpu_maps_update_done
  158. #else /* CONFIG_SMP */
  159. #define cpu_notifier(fn, pri) do { (void)(fn); } while (0)
  160. #define __cpu_notifier(fn, pri) do { (void)(fn); } while (0)
  161. static inline int register_cpu_notifier(struct notifier_block *nb)
  162. {
  163. return 0;
  164. }
  165. static inline int __register_cpu_notifier(struct notifier_block *nb)
  166. {
  167. return 0;
  168. }
  169. static inline void unregister_cpu_notifier(struct notifier_block *nb)
  170. {
  171. }
  172. static inline void __unregister_cpu_notifier(struct notifier_block *nb)
  173. {
  174. }
  175. static inline void cpu_maps_update_begin(void)
  176. {
  177. }
  178. static inline void cpu_maps_update_done(void)
  179. {
  180. }
  181. static inline void cpu_notifier_register_begin(void)
  182. {
  183. }
  184. static inline void cpu_notifier_register_done(void)
  185. {
  186. }
  187. static inline void smpboot_thread_init(void)
  188. {
  189. }
  190. #endif /* CONFIG_SMP */
  191. extern struct bus_type cpu_subsys;
  192. #ifdef CONFIG_HOTPLUG_CPU
  193. /* Stop CPUs going up and down. */
  194. extern void cpu_hotplug_begin(void);
  195. extern void cpu_hotplug_done(void);
  196. extern void get_online_cpus(void);
  197. extern void put_online_cpus(void);
  198. extern void cpu_hotplug_disable(void);
  199. extern void cpu_hotplug_enable(void);
  200. #define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
  201. #define __hotcpu_notifier(fn, pri) __cpu_notifier(fn, pri)
  202. #define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
  203. #define __register_hotcpu_notifier(nb) __register_cpu_notifier(nb)
  204. #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
  205. #define __unregister_hotcpu_notifier(nb) __unregister_cpu_notifier(nb)
  206. void clear_tasks_mm_cpumask(int cpu);
  207. int cpu_down(unsigned int cpu);
  208. #else /* CONFIG_HOTPLUG_CPU */
  209. static inline void cpu_hotplug_begin(void) {}
  210. static inline void cpu_hotplug_done(void) {}
  211. #define get_online_cpus() do { } while (0)
  212. #define put_online_cpus() do { } while (0)
  213. #define cpu_hotplug_disable() do { } while (0)
  214. #define cpu_hotplug_enable() do { } while (0)
  215. #define hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
  216. #define __hotcpu_notifier(fn, pri) do { (void)(fn); } while (0)
  217. /* These aren't inline functions due to a GCC bug. */
  218. #define register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
  219. #define __register_hotcpu_notifier(nb) ({ (void)(nb); 0; })
  220. #define unregister_hotcpu_notifier(nb) ({ (void)(nb); })
  221. #define __unregister_hotcpu_notifier(nb) ({ (void)(nb); })
  222. #endif /* CONFIG_HOTPLUG_CPU */
  223. #ifdef CONFIG_PM_SLEEP_SMP
  224. extern int disable_nonboot_cpus(void);
  225. extern void enable_nonboot_cpus(void);
  226. #else /* !CONFIG_PM_SLEEP_SMP */
  227. static inline int disable_nonboot_cpus(void) { return 0; }
  228. static inline void enable_nonboot_cpus(void) {}
  229. #endif /* !CONFIG_PM_SLEEP_SMP */
  230. enum cpuhp_state {
  231. CPUHP_OFFLINE,
  232. CPUHP_ONLINE,
  233. };
  234. void cpu_startup_entry(enum cpuhp_state state);
  235. void cpu_idle_poll_ctrl(bool enable);
  236. void arch_cpu_idle(void);
  237. void arch_cpu_idle_prepare(void);
  238. void arch_cpu_idle_enter(void);
  239. void arch_cpu_idle_exit(void);
  240. void arch_cpu_idle_dead(void);
  241. DECLARE_PER_CPU(bool, cpu_dead_idle);
  242. int cpu_report_state(int cpu);
  243. int cpu_check_up_prepare(int cpu);
  244. void cpu_set_state_online(int cpu);
  245. #ifdef CONFIG_HOTPLUG_CPU
  246. bool cpu_wait_death(unsigned int cpu, int seconds);
  247. bool cpu_report_death(void);
  248. #endif /* #ifdef CONFIG_HOTPLUG_CPU */
  249. #endif /* _LINUX_CPU_H_ */