cpufreq_spudemand.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171
  1. /*
  2. * spu aware cpufreq governor for the cell processor
  3. *
  4. * © Copyright IBM Corporation 2006-2008
  5. *
  6. * Author: Christian Krafft <krafft@de.ibm.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation; either version 2, or (at your option)
  11. * any later version.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program; if not, write to the Free Software
  20. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21. */
  22. #include <linux/cpufreq.h>
  23. #include <linux/sched.h>
  24. #include <linux/module.h>
  25. #include <linux/timer.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/atomic.h>
  28. #include <asm/machdep.h>
  29. #include <asm/spu.h>
  30. #define POLL_TIME 100000 /* in µs */
  31. #define EXP 753 /* exp(-1) in fixed-point */
  32. struct spu_gov_info_struct {
  33. unsigned long busy_spus; /* fixed-point */
  34. struct cpufreq_policy *policy;
  35. struct delayed_work work;
  36. unsigned int poll_int; /* µs */
  37. };
  38. static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info);
  39. static int calc_freq(struct spu_gov_info_struct *info)
  40. {
  41. int cpu;
  42. int busy_spus;
  43. cpu = info->policy->cpu;
  44. busy_spus = atomic_read(&cbe_spu_info[cpu_to_node(cpu)].busy_spus);
  45. CALC_LOAD(info->busy_spus, EXP, busy_spus * FIXED_1);
  46. pr_debug("cpu %d: busy_spus=%d, info->busy_spus=%ld\n",
  47. cpu, busy_spus, info->busy_spus);
  48. return info->policy->max * info->busy_spus / FIXED_1;
  49. }
  50. static void spu_gov_work(struct work_struct *work)
  51. {
  52. struct spu_gov_info_struct *info;
  53. int delay;
  54. unsigned long target_freq;
  55. info = container_of(work, struct spu_gov_info_struct, work.work);
  56. /* after cancel_delayed_work_sync we unset info->policy */
  57. BUG_ON(info->policy == NULL);
  58. target_freq = calc_freq(info);
  59. __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H);
  60. delay = usecs_to_jiffies(info->poll_int);
  61. schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
  62. }
  63. static void spu_gov_init_work(struct spu_gov_info_struct *info)
  64. {
  65. int delay = usecs_to_jiffies(info->poll_int);
  66. INIT_DEFERRABLE_WORK(&info->work, spu_gov_work);
  67. schedule_delayed_work_on(info->policy->cpu, &info->work, delay);
  68. }
  69. static void spu_gov_cancel_work(struct spu_gov_info_struct *info)
  70. {
  71. cancel_delayed_work_sync(&info->work);
  72. }
  73. static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event)
  74. {
  75. unsigned int cpu = policy->cpu;
  76. struct spu_gov_info_struct *info, *affected_info;
  77. int i;
  78. int ret = 0;
  79. info = &per_cpu(spu_gov_info, cpu);
  80. switch (event) {
  81. case CPUFREQ_GOV_START:
  82. if (!cpu_online(cpu)) {
  83. printk(KERN_ERR "cpu %d is not online\n", cpu);
  84. ret = -EINVAL;
  85. break;
  86. }
  87. if (!policy->cur) {
  88. printk(KERN_ERR "no cpu specified in policy\n");
  89. ret = -EINVAL;
  90. break;
  91. }
  92. /* initialize spu_gov_info for all affected cpus */
  93. for_each_cpu(i, policy->cpus) {
  94. affected_info = &per_cpu(spu_gov_info, i);
  95. affected_info->policy = policy;
  96. }
  97. info->poll_int = POLL_TIME;
  98. /* setup timer */
  99. spu_gov_init_work(info);
  100. break;
  101. case CPUFREQ_GOV_STOP:
  102. /* cancel timer */
  103. spu_gov_cancel_work(info);
  104. /* clean spu_gov_info for all affected cpus */
  105. for_each_cpu (i, policy->cpus) {
  106. info = &per_cpu(spu_gov_info, i);
  107. info->policy = NULL;
  108. }
  109. break;
  110. }
  111. return ret;
  112. }
  113. static struct cpufreq_governor spu_governor = {
  114. .name = "spudemand",
  115. .governor = spu_gov_govern,
  116. .owner = THIS_MODULE,
  117. };
  118. /*
  119. * module init and destoy
  120. */
  121. static int __init spu_gov_init(void)
  122. {
  123. int ret;
  124. ret = cpufreq_register_governor(&spu_governor);
  125. if (ret)
  126. printk(KERN_ERR "registration of governor failed\n");
  127. return ret;
  128. }
  129. static void __exit spu_gov_exit(void)
  130. {
  131. cpufreq_unregister_governor(&spu_governor);
  132. }
  133. module_init(spu_gov_init);
  134. module_exit(spu_gov_exit);
  135. MODULE_LICENSE("GPL");
  136. MODULE_AUTHOR("Christian Krafft <krafft@de.ibm.com>");