cbe_regs.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /*
  2. * cbe_regs.c
  3. *
  4. * Accessor routines for the various MMIO register blocks of the CBE
  5. *
  6. * (c) 2006 Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp.
  7. */
  8. #include <linux/percpu.h>
  9. #include <linux/types.h>
  10. #include <linux/export.h>
  11. #include <linux/of_device.h>
  12. #include <linux/of_platform.h>
  13. #include <asm/io.h>
  14. #include <asm/pgtable.h>
  15. #include <asm/prom.h>
  16. #include <asm/ptrace.h>
  17. #include <asm/cell-regs.h>
  18. /*
  19. * Current implementation uses "cpu" nodes. We build our own mapping
  20. * array of cpu numbers to cpu nodes locally for now to allow interrupt
  21. * time code to have a fast path rather than call of_get_cpu_node(). If
  22. * we implement cpu hotplug, we'll have to install an appropriate norifier
  23. * in order to release references to the cpu going away
  24. */
  25. static struct cbe_regs_map
  26. {
  27. struct device_node *cpu_node;
  28. struct device_node *be_node;
  29. struct cbe_pmd_regs __iomem *pmd_regs;
  30. struct cbe_iic_regs __iomem *iic_regs;
  31. struct cbe_mic_tm_regs __iomem *mic_tm_regs;
  32. struct cbe_pmd_shadow_regs pmd_shadow_regs;
  33. } cbe_regs_maps[MAX_CBE];
  34. static int cbe_regs_map_count;
  35. static struct cbe_thread_map
  36. {
  37. struct device_node *cpu_node;
  38. struct device_node *be_node;
  39. struct cbe_regs_map *regs;
  40. unsigned int thread_id;
  41. unsigned int cbe_id;
  42. } cbe_thread_map[NR_CPUS];
  43. static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} };
  44. static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE };
  45. static struct cbe_regs_map *cbe_find_map(struct device_node *np)
  46. {
  47. int i;
  48. struct device_node *tmp_np;
  49. if (strcasecmp(np->type, "spe")) {
  50. for (i = 0; i < cbe_regs_map_count; i++)
  51. if (cbe_regs_maps[i].cpu_node == np ||
  52. cbe_regs_maps[i].be_node == np)
  53. return &cbe_regs_maps[i];
  54. return NULL;
  55. }
  56. if (np->data)
  57. return np->data;
  58. /* walk up path until cpu or be node was found */
  59. tmp_np = np;
  60. do {
  61. tmp_np = tmp_np->parent;
  62. /* on a correct devicetree we wont get up to root */
  63. BUG_ON(!tmp_np);
  64. } while (strcasecmp(tmp_np->type, "cpu") &&
  65. strcasecmp(tmp_np->type, "be"));
  66. np->data = cbe_find_map(tmp_np);
  67. return np->data;
  68. }
  69. struct cbe_pmd_regs __iomem *cbe_get_pmd_regs(struct device_node *np)
  70. {
  71. struct cbe_regs_map *map = cbe_find_map(np);
  72. if (map == NULL)
  73. return NULL;
  74. return map->pmd_regs;
  75. }
  76. EXPORT_SYMBOL_GPL(cbe_get_pmd_regs);
  77. struct cbe_pmd_regs __iomem *cbe_get_cpu_pmd_regs(int cpu)
  78. {
  79. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  80. if (map == NULL)
  81. return NULL;
  82. return map->pmd_regs;
  83. }
  84. EXPORT_SYMBOL_GPL(cbe_get_cpu_pmd_regs);
  85. struct cbe_pmd_shadow_regs *cbe_get_pmd_shadow_regs(struct device_node *np)
  86. {
  87. struct cbe_regs_map *map = cbe_find_map(np);
  88. if (map == NULL)
  89. return NULL;
  90. return &map->pmd_shadow_regs;
  91. }
  92. struct cbe_pmd_shadow_regs *cbe_get_cpu_pmd_shadow_regs(int cpu)
  93. {
  94. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  95. if (map == NULL)
  96. return NULL;
  97. return &map->pmd_shadow_regs;
  98. }
  99. struct cbe_iic_regs __iomem *cbe_get_iic_regs(struct device_node *np)
  100. {
  101. struct cbe_regs_map *map = cbe_find_map(np);
  102. if (map == NULL)
  103. return NULL;
  104. return map->iic_regs;
  105. }
  106. struct cbe_iic_regs __iomem *cbe_get_cpu_iic_regs(int cpu)
  107. {
  108. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  109. if (map == NULL)
  110. return NULL;
  111. return map->iic_regs;
  112. }
  113. struct cbe_mic_tm_regs __iomem *cbe_get_mic_tm_regs(struct device_node *np)
  114. {
  115. struct cbe_regs_map *map = cbe_find_map(np);
  116. if (map == NULL)
  117. return NULL;
  118. return map->mic_tm_regs;
  119. }
  120. struct cbe_mic_tm_regs __iomem *cbe_get_cpu_mic_tm_regs(int cpu)
  121. {
  122. struct cbe_regs_map *map = cbe_thread_map[cpu].regs;
  123. if (map == NULL)
  124. return NULL;
  125. return map->mic_tm_regs;
  126. }
  127. EXPORT_SYMBOL_GPL(cbe_get_cpu_mic_tm_regs);
  128. u32 cbe_get_hw_thread_id(int cpu)
  129. {
  130. return cbe_thread_map[cpu].thread_id;
  131. }
  132. EXPORT_SYMBOL_GPL(cbe_get_hw_thread_id);
  133. u32 cbe_cpu_to_node(int cpu)
  134. {
  135. return cbe_thread_map[cpu].cbe_id;
  136. }
  137. EXPORT_SYMBOL_GPL(cbe_cpu_to_node);
  138. u32 cbe_node_to_cpu(int node)
  139. {
  140. return cpumask_first(&cbe_local_mask[node]);
  141. }
  142. EXPORT_SYMBOL_GPL(cbe_node_to_cpu);
  143. static struct device_node *cbe_get_be_node(int cpu_id)
  144. {
  145. struct device_node *np;
  146. for_each_node_by_type (np, "be") {
  147. int len,i;
  148. const phandle *cpu_handle;
  149. cpu_handle = of_get_property(np, "cpus", &len);
  150. /*
  151. * the CAB SLOF tree is non compliant, so we just assume
  152. * there is only one node
  153. */
  154. if (WARN_ON_ONCE(!cpu_handle))
  155. return np;
  156. for (i=0; i<len; i++)
  157. if (of_find_node_by_phandle(cpu_handle[i]) == of_get_cpu_node(cpu_id, NULL))
  158. return np;
  159. }
  160. return NULL;
  161. }
  162. void __init cbe_fill_regs_map(struct cbe_regs_map *map)
  163. {
  164. if(map->be_node) {
  165. struct device_node *be, *np;
  166. be = map->be_node;
  167. for_each_node_by_type(np, "pervasive")
  168. if (of_get_parent(np) == be)
  169. map->pmd_regs = of_iomap(np, 0);
  170. for_each_node_by_type(np, "CBEA-Internal-Interrupt-Controller")
  171. if (of_get_parent(np) == be)
  172. map->iic_regs = of_iomap(np, 2);
  173. for_each_node_by_type(np, "mic-tm")
  174. if (of_get_parent(np) == be)
  175. map->mic_tm_regs = of_iomap(np, 0);
  176. } else {
  177. struct device_node *cpu;
  178. /* That hack must die die die ! */
  179. const struct address_prop {
  180. unsigned long address;
  181. unsigned int len;
  182. } __attribute__((packed)) *prop;
  183. cpu = map->cpu_node;
  184. prop = of_get_property(cpu, "pervasive", NULL);
  185. if (prop != NULL)
  186. map->pmd_regs = ioremap(prop->address, prop->len);
  187. prop = of_get_property(cpu, "iic", NULL);
  188. if (prop != NULL)
  189. map->iic_regs = ioremap(prop->address, prop->len);
  190. prop = of_get_property(cpu, "mic-tm", NULL);
  191. if (prop != NULL)
  192. map->mic_tm_regs = ioremap(prop->address, prop->len);
  193. }
  194. }
  195. void __init cbe_regs_init(void)
  196. {
  197. int i;
  198. unsigned int thread_id;
  199. struct device_node *cpu;
  200. /* Build local fast map of CPUs */
  201. for_each_possible_cpu(i) {
  202. cbe_thread_map[i].cpu_node = of_get_cpu_node(i, &thread_id);
  203. cbe_thread_map[i].be_node = cbe_get_be_node(i);
  204. cbe_thread_map[i].thread_id = thread_id;
  205. }
  206. /* Find maps for each device tree CPU */
  207. for_each_node_by_type(cpu, "cpu") {
  208. struct cbe_regs_map *map;
  209. unsigned int cbe_id;
  210. cbe_id = cbe_regs_map_count++;
  211. map = &cbe_regs_maps[cbe_id];
  212. if (cbe_regs_map_count > MAX_CBE) {
  213. printk(KERN_ERR "cbe_regs: More BE chips than supported"
  214. "!\n");
  215. cbe_regs_map_count--;
  216. of_node_put(cpu);
  217. return;
  218. }
  219. map->cpu_node = cpu;
  220. for_each_possible_cpu(i) {
  221. struct cbe_thread_map *thread = &cbe_thread_map[i];
  222. if (thread->cpu_node == cpu) {
  223. thread->regs = map;
  224. thread->cbe_id = cbe_id;
  225. map->be_node = thread->be_node;
  226. cpumask_set_cpu(i, &cbe_local_mask[cbe_id]);
  227. if(thread->thread_id == 0)
  228. cpumask_set_cpu(i, &cbe_first_online_cpu);
  229. }
  230. }
  231. cbe_fill_regs_map(map);
  232. }
  233. }