cpufreq_conservative.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406
  1. /*
  2. * drivers/cpufreq/cpufreq_conservative.c
  3. *
  4. * Copyright (C) 2001 Russell King
  5. * (C) 2003 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  6. * Jun Nakajima <jun.nakajima@intel.com>
  7. * (C) 2009 Alexander Clouter <alex@digriz.org.uk>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. */
  13. #include <linux/slab.h>
  14. #include "cpufreq_governor.h"
  15. /* Conservative governor macros */
  16. #define DEF_FREQUENCY_UP_THRESHOLD (80)
  17. #define DEF_FREQUENCY_DOWN_THRESHOLD (20)
  18. #define DEF_FREQUENCY_STEP (5)
  19. #define DEF_SAMPLING_DOWN_FACTOR (1)
  20. #define MAX_SAMPLING_DOWN_FACTOR (10)
  21. static DEFINE_PER_CPU(struct cs_cpu_dbs_info_s, cs_cpu_dbs_info);
  22. static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
  23. unsigned int event);
  24. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
  25. static
  26. #endif
  27. struct cpufreq_governor cpufreq_gov_conservative = {
  28. .name = "conservative",
  29. .governor = cs_cpufreq_governor_dbs,
  30. .max_transition_latency = TRANSITION_LATENCY_LIMIT,
  31. .owner = THIS_MODULE,
  32. };
  33. static inline unsigned int get_freq_target(struct cs_dbs_tuners *cs_tuners,
  34. struct cpufreq_policy *policy)
  35. {
  36. unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100;
  37. /* max freq cannot be less than 100. But who knows... */
  38. if (unlikely(freq_target == 0))
  39. freq_target = DEF_FREQUENCY_STEP;
  40. return freq_target;
  41. }
  42. /*
  43. * Every sampling_rate, we check, if current idle time is less than 20%
  44. * (default), then we try to increase frequency. Every sampling_rate *
  45. * sampling_down_factor, we check, if current idle time is more than 80%
  46. * (default), then we try to decrease frequency
  47. *
  48. * Any frequency increase takes it to the maximum frequency. Frequency reduction
  49. * happens at minimum steps of 5% (default) of maximum frequency
  50. */
  51. static void cs_check_cpu(int cpu, unsigned int load)
  52. {
  53. struct cs_cpu_dbs_info_s *dbs_info = &per_cpu(cs_cpu_dbs_info, cpu);
  54. struct cpufreq_policy *policy = dbs_info->cdbs.shared->policy;
  55. struct dbs_data *dbs_data = policy->governor_data;
  56. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  57. /*
  58. * break out if we 'cannot' reduce the speed as the user might
  59. * want freq_step to be zero
  60. */
  61. if (cs_tuners->freq_step == 0)
  62. return;
  63. /* Check for frequency increase */
  64. if (load > cs_tuners->up_threshold) {
  65. dbs_info->down_skip = 0;
  66. /* if we are already at full speed then break out early */
  67. if (dbs_info->requested_freq == policy->max)
  68. return;
  69. dbs_info->requested_freq += get_freq_target(cs_tuners, policy);
  70. if (dbs_info->requested_freq > policy->max)
  71. dbs_info->requested_freq = policy->max;
  72. __cpufreq_driver_target(policy, dbs_info->requested_freq,
  73. CPUFREQ_RELATION_H);
  74. return;
  75. }
  76. /* if sampling_down_factor is active break out early */
  77. if (++dbs_info->down_skip < cs_tuners->sampling_down_factor)
  78. return;
  79. dbs_info->down_skip = 0;
  80. /* Check for frequency decrease */
  81. if (load < cs_tuners->down_threshold) {
  82. unsigned int freq_target;
  83. /*
  84. * if we cannot reduce the frequency anymore, break out early
  85. */
  86. if (policy->cur == policy->min)
  87. return;
  88. freq_target = get_freq_target(cs_tuners, policy);
  89. if (dbs_info->requested_freq > freq_target)
  90. dbs_info->requested_freq -= freq_target;
  91. else
  92. dbs_info->requested_freq = policy->min;
  93. __cpufreq_driver_target(policy, dbs_info->requested_freq,
  94. CPUFREQ_RELATION_L);
  95. return;
  96. }
  97. }
  98. static unsigned int cs_dbs_timer(struct cpu_dbs_info *cdbs,
  99. struct dbs_data *dbs_data, bool modify_all)
  100. {
  101. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  102. if (modify_all)
  103. dbs_check_cpu(dbs_data, cdbs->shared->policy->cpu);
  104. return delay_for_sampling_rate(cs_tuners->sampling_rate);
  105. }
  106. static int dbs_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
  107. void *data)
  108. {
  109. struct cpufreq_freqs *freq = data;
  110. struct cs_cpu_dbs_info_s *dbs_info =
  111. &per_cpu(cs_cpu_dbs_info, freq->cpu);
  112. struct cpufreq_policy *policy = cpufreq_cpu_get_raw(freq->cpu);
  113. if (!policy)
  114. return 0;
  115. /* policy isn't governed by conservative governor */
  116. if (policy->governor != &cpufreq_gov_conservative)
  117. return 0;
  118. /*
  119. * we only care if our internally tracked freq moves outside the 'valid'
  120. * ranges of frequency available to us otherwise we do not change it
  121. */
  122. if (dbs_info->requested_freq > policy->max
  123. || dbs_info->requested_freq < policy->min)
  124. dbs_info->requested_freq = freq->new;
  125. return 0;
  126. }
  127. static struct notifier_block cs_cpufreq_notifier_block = {
  128. .notifier_call = dbs_cpufreq_notifier,
  129. };
  130. /************************** sysfs interface ************************/
  131. static struct common_dbs_data cs_dbs_cdata;
  132. static ssize_t store_sampling_down_factor(struct dbs_data *dbs_data,
  133. const char *buf, size_t count)
  134. {
  135. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  136. unsigned int input;
  137. int ret;
  138. ret = sscanf(buf, "%u", &input);
  139. if (ret != 1 || input > MAX_SAMPLING_DOWN_FACTOR || input < 1)
  140. return -EINVAL;
  141. cs_tuners->sampling_down_factor = input;
  142. return count;
  143. }
  144. static ssize_t store_sampling_rate(struct dbs_data *dbs_data, const char *buf,
  145. size_t count)
  146. {
  147. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  148. unsigned int input;
  149. int ret;
  150. ret = sscanf(buf, "%u", &input);
  151. if (ret != 1)
  152. return -EINVAL;
  153. cs_tuners->sampling_rate = max(input, dbs_data->min_sampling_rate);
  154. return count;
  155. }
  156. static ssize_t store_up_threshold(struct dbs_data *dbs_data, const char *buf,
  157. size_t count)
  158. {
  159. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  160. unsigned int input;
  161. int ret;
  162. ret = sscanf(buf, "%u", &input);
  163. if (ret != 1 || input > 100 || input <= cs_tuners->down_threshold)
  164. return -EINVAL;
  165. cs_tuners->up_threshold = input;
  166. return count;
  167. }
  168. static ssize_t store_down_threshold(struct dbs_data *dbs_data, const char *buf,
  169. size_t count)
  170. {
  171. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  172. unsigned int input;
  173. int ret;
  174. ret = sscanf(buf, "%u", &input);
  175. /* cannot be lower than 1 otherwise freq will not fall */
  176. if (ret != 1 || input < 1 || input > 100 ||
  177. input >= cs_tuners->up_threshold)
  178. return -EINVAL;
  179. cs_tuners->down_threshold = input;
  180. return count;
  181. }
  182. static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
  183. const char *buf, size_t count)
  184. {
  185. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  186. unsigned int input, j;
  187. int ret;
  188. ret = sscanf(buf, "%u", &input);
  189. if (ret != 1)
  190. return -EINVAL;
  191. if (input > 1)
  192. input = 1;
  193. if (input == cs_tuners->ignore_nice_load) /* nothing to do */
  194. return count;
  195. cs_tuners->ignore_nice_load = input;
  196. /* we need to re-evaluate prev_cpu_idle */
  197. for_each_online_cpu(j) {
  198. struct cs_cpu_dbs_info_s *dbs_info;
  199. dbs_info = &per_cpu(cs_cpu_dbs_info, j);
  200. dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
  201. &dbs_info->cdbs.prev_cpu_wall, 0);
  202. if (cs_tuners->ignore_nice_load)
  203. dbs_info->cdbs.prev_cpu_nice =
  204. kcpustat_cpu(j).cpustat[CPUTIME_NICE];
  205. }
  206. return count;
  207. }
  208. static ssize_t store_freq_step(struct dbs_data *dbs_data, const char *buf,
  209. size_t count)
  210. {
  211. struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
  212. unsigned int input;
  213. int ret;
  214. ret = sscanf(buf, "%u", &input);
  215. if (ret != 1)
  216. return -EINVAL;
  217. if (input > 100)
  218. input = 100;
  219. /*
  220. * no need to test here if freq_step is zero as the user might actually
  221. * want this, they would be crazy though :)
  222. */
  223. cs_tuners->freq_step = input;
  224. return count;
  225. }
  226. show_store_one(cs, sampling_rate);
  227. show_store_one(cs, sampling_down_factor);
  228. show_store_one(cs, up_threshold);
  229. show_store_one(cs, down_threshold);
  230. show_store_one(cs, ignore_nice_load);
  231. show_store_one(cs, freq_step);
  232. declare_show_sampling_rate_min(cs);
  233. gov_sys_pol_attr_rw(sampling_rate);
  234. gov_sys_pol_attr_rw(sampling_down_factor);
  235. gov_sys_pol_attr_rw(up_threshold);
  236. gov_sys_pol_attr_rw(down_threshold);
  237. gov_sys_pol_attr_rw(ignore_nice_load);
  238. gov_sys_pol_attr_rw(freq_step);
  239. gov_sys_pol_attr_ro(sampling_rate_min);
  240. static struct attribute *dbs_attributes_gov_sys[] = {
  241. &sampling_rate_min_gov_sys.attr,
  242. &sampling_rate_gov_sys.attr,
  243. &sampling_down_factor_gov_sys.attr,
  244. &up_threshold_gov_sys.attr,
  245. &down_threshold_gov_sys.attr,
  246. &ignore_nice_load_gov_sys.attr,
  247. &freq_step_gov_sys.attr,
  248. NULL
  249. };
  250. static struct attribute_group cs_attr_group_gov_sys = {
  251. .attrs = dbs_attributes_gov_sys,
  252. .name = "conservative",
  253. };
  254. static struct attribute *dbs_attributes_gov_pol[] = {
  255. &sampling_rate_min_gov_pol.attr,
  256. &sampling_rate_gov_pol.attr,
  257. &sampling_down_factor_gov_pol.attr,
  258. &up_threshold_gov_pol.attr,
  259. &down_threshold_gov_pol.attr,
  260. &ignore_nice_load_gov_pol.attr,
  261. &freq_step_gov_pol.attr,
  262. NULL
  263. };
  264. static struct attribute_group cs_attr_group_gov_pol = {
  265. .attrs = dbs_attributes_gov_pol,
  266. .name = "conservative",
  267. };
  268. /************************** sysfs end ************************/
  269. static int cs_init(struct dbs_data *dbs_data, bool notify)
  270. {
  271. struct cs_dbs_tuners *tuners;
  272. tuners = kzalloc(sizeof(*tuners), GFP_KERNEL);
  273. if (!tuners) {
  274. pr_err("%s: kzalloc failed\n", __func__);
  275. return -ENOMEM;
  276. }
  277. tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
  278. tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
  279. tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
  280. tuners->ignore_nice_load = 0;
  281. tuners->freq_step = DEF_FREQUENCY_STEP;
  282. dbs_data->tuners = tuners;
  283. dbs_data->min_sampling_rate = MIN_SAMPLING_RATE_RATIO *
  284. jiffies_to_usecs(10);
  285. if (notify)
  286. cpufreq_register_notifier(&cs_cpufreq_notifier_block,
  287. CPUFREQ_TRANSITION_NOTIFIER);
  288. return 0;
  289. }
  290. static void cs_exit(struct dbs_data *dbs_data, bool notify)
  291. {
  292. if (notify)
  293. cpufreq_unregister_notifier(&cs_cpufreq_notifier_block,
  294. CPUFREQ_TRANSITION_NOTIFIER);
  295. kfree(dbs_data->tuners);
  296. }
  297. define_get_cpu_dbs_routines(cs_cpu_dbs_info);
  298. static struct common_dbs_data cs_dbs_cdata = {
  299. .governor = GOV_CONSERVATIVE,
  300. .attr_group_gov_sys = &cs_attr_group_gov_sys,
  301. .attr_group_gov_pol = &cs_attr_group_gov_pol,
  302. .get_cpu_cdbs = get_cpu_cdbs,
  303. .get_cpu_dbs_info_s = get_cpu_dbs_info_s,
  304. .gov_dbs_timer = cs_dbs_timer,
  305. .gov_check_cpu = cs_check_cpu,
  306. .init = cs_init,
  307. .exit = cs_exit,
  308. .mutex = __MUTEX_INITIALIZER(cs_dbs_cdata.mutex),
  309. };
  310. static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy,
  311. unsigned int event)
  312. {
  313. return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event);
  314. }
  315. static int __init cpufreq_gov_dbs_init(void)
  316. {
  317. return cpufreq_register_governor(&cpufreq_gov_conservative);
  318. }
  319. static void __exit cpufreq_gov_dbs_exit(void)
  320. {
  321. cpufreq_unregister_governor(&cpufreq_gov_conservative);
  322. }
  323. MODULE_AUTHOR("Alexander Clouter <alex@digriz.org.uk>");
  324. MODULE_DESCRIPTION("'cpufreq_conservative' - A dynamic cpufreq governor for "
  325. "Low Latency Frequency Transition capable processors "
  326. "optimised for use in a battery environment");
  327. MODULE_LICENSE("GPL");
  328. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE
  329. fs_initcall(cpufreq_gov_dbs_init);
  330. #else
  331. module_init(cpufreq_gov_dbs_init);
  332. #endif
  333. module_exit(cpufreq_gov_dbs_exit);