sc-mips.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. /*
  2. * Copyright (C) 2006 Chris Dearman (chris@mips.com),
  3. */
  4. #include <linux/init.h>
  5. #include <linux/kernel.h>
  6. #include <linux/sched.h>
  7. #include <linux/mm.h>
  8. #include <asm/cpu-type.h>
  9. #include <asm/mipsregs.h>
  10. #include <asm/bcache.h>
  11. #include <asm/cacheops.h>
  12. #include <asm/page.h>
  13. #include <asm/pgtable.h>
  14. #include <asm/mmu_context.h>
  15. #include <asm/r4kcache.h>
  16. #include <asm/mips-cm.h>
  17. /*
  18. * MIPS32/MIPS64 L2 cache handling
  19. */
  20. /*
  21. * Writeback and invalidate the secondary cache before DMA.
  22. */
  23. static void mips_sc_wback_inv(unsigned long addr, unsigned long size)
  24. {
  25. blast_scache_range(addr, addr + size);
  26. }
  27. /*
  28. * Invalidate the secondary cache before DMA.
  29. */
  30. static void mips_sc_inv(unsigned long addr, unsigned long size)
  31. {
  32. unsigned long lsize = cpu_scache_line_size();
  33. unsigned long almask = ~(lsize - 1);
  34. cache_op(Hit_Writeback_Inv_SD, addr & almask);
  35. cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask);
  36. blast_inv_scache_range(addr, addr + size);
  37. }
  38. static void mips_sc_enable(void)
  39. {
  40. /* L2 cache is permanently enabled */
  41. }
  42. static void mips_sc_disable(void)
  43. {
  44. /* L2 cache is permanently enabled */
  45. }
  46. static void mips_sc_prefetch_enable(void)
  47. {
  48. unsigned long pftctl;
  49. if (mips_cm_revision() < CM_REV_CM2_5)
  50. return;
  51. /*
  52. * If there is one or more L2 prefetch unit present then enable
  53. * prefetching for both code & data, for all ports.
  54. */
  55. pftctl = read_gcr_l2_pft_control();
  56. if (pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK) {
  57. pftctl &= ~CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK;
  58. pftctl |= PAGE_MASK & CM_GCR_L2_PFT_CONTROL_PAGEMASK_MSK;
  59. pftctl |= CM_GCR_L2_PFT_CONTROL_PFTEN_MSK;
  60. write_gcr_l2_pft_control(pftctl);
  61. pftctl = read_gcr_l2_pft_control_b();
  62. pftctl |= CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK;
  63. pftctl |= CM_GCR_L2_PFT_CONTROL_B_CEN_MSK;
  64. write_gcr_l2_pft_control_b(pftctl);
  65. }
  66. }
  67. static void mips_sc_prefetch_disable(void)
  68. {
  69. unsigned long pftctl;
  70. if (mips_cm_revision() < CM_REV_CM2_5)
  71. return;
  72. pftctl = read_gcr_l2_pft_control();
  73. pftctl &= ~CM_GCR_L2_PFT_CONTROL_PFTEN_MSK;
  74. write_gcr_l2_pft_control(pftctl);
  75. pftctl = read_gcr_l2_pft_control_b();
  76. pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_PORTID_MSK;
  77. pftctl &= ~CM_GCR_L2_PFT_CONTROL_B_CEN_MSK;
  78. write_gcr_l2_pft_control_b(pftctl);
  79. }
  80. static bool mips_sc_prefetch_is_enabled(void)
  81. {
  82. unsigned long pftctl;
  83. if (mips_cm_revision() < CM_REV_CM2_5)
  84. return false;
  85. pftctl = read_gcr_l2_pft_control();
  86. if (!(pftctl & CM_GCR_L2_PFT_CONTROL_NPFT_MSK))
  87. return false;
  88. return !!(pftctl & CM_GCR_L2_PFT_CONTROL_PFTEN_MSK);
  89. }
  90. static struct bcache_ops mips_sc_ops = {
  91. .bc_enable = mips_sc_enable,
  92. .bc_disable = mips_sc_disable,
  93. .bc_wback_inv = mips_sc_wback_inv,
  94. .bc_inv = mips_sc_inv,
  95. .bc_prefetch_enable = mips_sc_prefetch_enable,
  96. .bc_prefetch_disable = mips_sc_prefetch_disable,
  97. .bc_prefetch_is_enabled = mips_sc_prefetch_is_enabled,
  98. };
  99. /*
  100. * Check if the L2 cache controller is activated on a particular platform.
  101. * MTI's L2 controller and the L2 cache controller of Broadcom's BMIPS
  102. * cores both use c0_config2's bit 12 as "L2 Bypass" bit, that is the
  103. * cache being disabled. However there is no guarantee for this to be
  104. * true on all platforms. In an act of stupidity the spec defined bits
  105. * 12..15 as implementation defined so below function will eventually have
  106. * to be replaced by a platform specific probe.
  107. */
  108. static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
  109. {
  110. unsigned int config2 = read_c0_config2();
  111. unsigned int tmp;
  112. /* Check the bypass bit (L2B) */
  113. switch (current_cpu_type()) {
  114. case CPU_34K:
  115. case CPU_74K:
  116. case CPU_1004K:
  117. case CPU_1074K:
  118. case CPU_INTERAPTIV:
  119. case CPU_PROAPTIV:
  120. case CPU_P5600:
  121. case CPU_BMIPS5000:
  122. case CPU_QEMU_GENERIC:
  123. if (config2 & (1 << 12))
  124. return 0;
  125. }
  126. tmp = (config2 >> 4) & 0x0f;
  127. if (0 < tmp && tmp <= 7)
  128. c->scache.linesz = 2 << tmp;
  129. else
  130. return 0;
  131. return 1;
  132. }
  133. static int __init mips_sc_probe_cm3(void)
  134. {
  135. struct cpuinfo_mips *c = &current_cpu_data;
  136. unsigned long cfg = read_gcr_l2_config();
  137. unsigned long sets, line_sz, assoc;
  138. if (cfg & CM_GCR_L2_CONFIG_BYPASS_MSK)
  139. return 0;
  140. sets = cfg & CM_GCR_L2_CONFIG_SET_SIZE_MSK;
  141. sets >>= CM_GCR_L2_CONFIG_SET_SIZE_SHF;
  142. if (sets)
  143. c->scache.sets = 64 << sets;
  144. line_sz = cfg & CM_GCR_L2_CONFIG_LINE_SIZE_MSK;
  145. line_sz >>= CM_GCR_L2_CONFIG_LINE_SIZE_SHF;
  146. if (line_sz)
  147. c->scache.linesz = 2 << line_sz;
  148. assoc = cfg & CM_GCR_L2_CONFIG_ASSOC_MSK;
  149. assoc >>= CM_GCR_L2_CONFIG_ASSOC_SHF;
  150. c->scache.ways = assoc + 1;
  151. c->scache.waysize = c->scache.sets * c->scache.linesz;
  152. c->scache.waybit = __ffs(c->scache.waysize);
  153. if (c->scache.linesz) {
  154. c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
  155. return 1;
  156. }
  157. return 0;
  158. }
  159. void __weak platform_early_l2_init(void)
  160. {
  161. }
  162. static inline int __init mips_sc_probe(void)
  163. {
  164. struct cpuinfo_mips *c = &current_cpu_data;
  165. unsigned int config1, config2;
  166. unsigned int tmp;
  167. /* Mark as not present until probe completed */
  168. c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
  169. /*
  170. * Do we need some platform specific probing before
  171. * we configure L2?
  172. */
  173. platform_early_l2_init();
  174. if (mips_cm_revision() >= CM_REV_CM3)
  175. return mips_sc_probe_cm3();
  176. /* Ignore anything but MIPSxx processors */
  177. if (!(c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M32R2 |
  178. MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R1 |
  179. MIPS_CPU_ISA_M64R2 | MIPS_CPU_ISA_M64R6)))
  180. return 0;
  181. /* Does this MIPS32/MIPS64 CPU have a config2 register? */
  182. config1 = read_c0_config1();
  183. if (!(config1 & MIPS_CONF_M))
  184. return 0;
  185. config2 = read_c0_config2();
  186. if (!mips_sc_is_activated(c))
  187. return 0;
  188. tmp = (config2 >> 8) & 0x0f;
  189. if (tmp <= 7)
  190. c->scache.sets = 64 << tmp;
  191. else
  192. return 0;
  193. tmp = (config2 >> 0) & 0x0f;
  194. if (tmp <= 7)
  195. c->scache.ways = tmp + 1;
  196. else
  197. return 0;
  198. c->scache.waysize = c->scache.sets * c->scache.linesz;
  199. c->scache.waybit = __ffs(c->scache.waysize);
  200. c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
  201. return 1;
  202. }
  203. int mips_sc_init(void)
  204. {
  205. int found = mips_sc_probe();
  206. if (found) {
  207. mips_sc_enable();
  208. mips_sc_prefetch_enable();
  209. bcops = &mips_sc_ops;
  210. }
  211. return found;
  212. }