cache-uniphier.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555
  1. /*
  2. * Copyright (C) 2015 Masahiro Yamada <yamada.masahiro@socionext.com>
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #define pr_fmt(fmt) "uniphier: " fmt
  15. #include <linux/init.h>
  16. #include <linux/io.h>
  17. #include <linux/log2.h>
  18. #include <linux/of_address.h>
  19. #include <linux/slab.h>
  20. #include <asm/hardware/cache-uniphier.h>
  21. #include <asm/outercache.h>
  22. /* control registers */
  23. #define UNIPHIER_SSCC 0x0 /* Control Register */
  24. #define UNIPHIER_SSCC_BST BIT(20) /* UCWG burst read */
  25. #define UNIPHIER_SSCC_ACT BIT(19) /* Inst-Data separate */
  26. #define UNIPHIER_SSCC_WTG BIT(18) /* WT gathering on */
  27. #define UNIPHIER_SSCC_PRD BIT(17) /* enable pre-fetch */
  28. #define UNIPHIER_SSCC_ON BIT(0) /* enable cache */
  29. #define UNIPHIER_SSCLPDAWCR 0x30 /* Unified/Data Active Way Control */
  30. #define UNIPHIER_SSCLPIAWCR 0x34 /* Instruction Active Way Control */
  31. /* revision registers */
  32. #define UNIPHIER_SSCID 0x0 /* ID Register */
  33. /* operation registers */
  34. #define UNIPHIER_SSCOPE 0x244 /* Cache Operation Primitive Entry */
  35. #define UNIPHIER_SSCOPE_CM_INV 0x0 /* invalidate */
  36. #define UNIPHIER_SSCOPE_CM_CLEAN 0x1 /* clean */
  37. #define UNIPHIER_SSCOPE_CM_FLUSH 0x2 /* flush */
  38. #define UNIPHIER_SSCOPE_CM_SYNC 0x8 /* sync (drain bufs) */
  39. #define UNIPHIER_SSCOPE_CM_FLUSH_PREFETCH 0x9 /* flush p-fetch buf */
  40. #define UNIPHIER_SSCOQM 0x248 /* Cache Operation Queue Mode */
  41. #define UNIPHIER_SSCOQM_TID_MASK (0x3 << 21)
  42. #define UNIPHIER_SSCOQM_TID_LRU_DATA (0x0 << 21)
  43. #define UNIPHIER_SSCOQM_TID_LRU_INST (0x1 << 21)
  44. #define UNIPHIER_SSCOQM_TID_WAY (0x2 << 21)
  45. #define UNIPHIER_SSCOQM_S_MASK (0x3 << 17)
  46. #define UNIPHIER_SSCOQM_S_RANGE (0x0 << 17)
  47. #define UNIPHIER_SSCOQM_S_ALL (0x1 << 17)
  48. #define UNIPHIER_SSCOQM_S_WAY (0x2 << 17)
  49. #define UNIPHIER_SSCOQM_CE BIT(15) /* notify completion */
  50. #define UNIPHIER_SSCOQM_CM_INV 0x0 /* invalidate */
  51. #define UNIPHIER_SSCOQM_CM_CLEAN 0x1 /* clean */
  52. #define UNIPHIER_SSCOQM_CM_FLUSH 0x2 /* flush */
  53. #define UNIPHIER_SSCOQM_CM_PREFETCH 0x3 /* prefetch to cache */
  54. #define UNIPHIER_SSCOQM_CM_PREFETCH_BUF 0x4 /* prefetch to pf-buf */
  55. #define UNIPHIER_SSCOQM_CM_TOUCH 0x5 /* touch */
  56. #define UNIPHIER_SSCOQM_CM_TOUCH_ZERO 0x6 /* touch to zero */
  57. #define UNIPHIER_SSCOQM_CM_TOUCH_DIRTY 0x7 /* touch with dirty */
  58. #define UNIPHIER_SSCOQAD 0x24c /* Cache Operation Queue Address */
  59. #define UNIPHIER_SSCOQSZ 0x250 /* Cache Operation Queue Size */
  60. #define UNIPHIER_SSCOQMASK 0x254 /* Cache Operation Queue Address Mask */
  61. #define UNIPHIER_SSCOQWN 0x258 /* Cache Operation Queue Way Number */
  62. #define UNIPHIER_SSCOPPQSEF 0x25c /* Cache Operation Queue Set Complete*/
  63. #define UNIPHIER_SSCOPPQSEF_FE BIT(1)
  64. #define UNIPHIER_SSCOPPQSEF_OE BIT(0)
  65. #define UNIPHIER_SSCOLPQS 0x260 /* Cache Operation Queue Status */
  66. #define UNIPHIER_SSCOLPQS_EF BIT(2)
  67. #define UNIPHIER_SSCOLPQS_EST BIT(1)
  68. #define UNIPHIER_SSCOLPQS_QST BIT(0)
  69. /* Is the touch/pre-fetch destination specified by ways? */
  70. #define UNIPHIER_SSCOQM_TID_IS_WAY(op) \
  71. ((op & UNIPHIER_SSCOQM_TID_MASK) == UNIPHIER_SSCOQM_TID_WAY)
  72. /* Is the operation region specified by address range? */
  73. #define UNIPHIER_SSCOQM_S_IS_RANGE(op) \
  74. ((op & UNIPHIER_SSCOQM_S_MASK) == UNIPHIER_SSCOQM_S_RANGE)
  75. /**
  76. * uniphier_cache_data - UniPhier outer cache specific data
  77. *
  78. * @ctrl_base: virtual base address of control registers
  79. * @rev_base: virtual base address of revision registers
  80. * @op_base: virtual base address of operation registers
  81. * @way_present_mask: each bit specifies if the way is present
  82. * @way_locked_mask: each bit specifies if the way is locked
  83. * @nsets: number of associativity sets
  84. * @line_size: line size in bytes
  85. * @range_op_max_size: max size that can be handled by a single range operation
  86. * @list: list node to include this level in the whole cache hierarchy
  87. */
  88. struct uniphier_cache_data {
  89. void __iomem *ctrl_base;
  90. void __iomem *rev_base;
  91. void __iomem *op_base;
  92. u32 way_present_mask;
  93. u32 way_locked_mask;
  94. u32 nsets;
  95. u32 line_size;
  96. u32 range_op_max_size;
  97. struct list_head list;
  98. };
  99. /*
  100. * List of the whole outer cache hierarchy. This list is only modified during
  101. * the early boot stage, so no mutex is taken for the access to the list.
  102. */
  103. static LIST_HEAD(uniphier_cache_list);
  104. /**
  105. * __uniphier_cache_sync - perform a sync point for a particular cache level
  106. *
  107. * @data: cache controller specific data
  108. */
  109. static void __uniphier_cache_sync(struct uniphier_cache_data *data)
  110. {
  111. /* This sequence need not be atomic. Do not disable IRQ. */
  112. writel_relaxed(UNIPHIER_SSCOPE_CM_SYNC,
  113. data->op_base + UNIPHIER_SSCOPE);
  114. /* need a read back to confirm */
  115. readl_relaxed(data->op_base + UNIPHIER_SSCOPE);
  116. }
  117. /**
  118. * __uniphier_cache_maint_common - run a queue operation for a particular level
  119. *
  120. * @data: cache controller specific data
  121. * @start: start address of range operation (don't care for "all" operation)
  122. * @size: data size of range operation (don't care for "all" operation)
  123. * @operation: flags to specify the desired cache operation
  124. */
  125. static void __uniphier_cache_maint_common(struct uniphier_cache_data *data,
  126. unsigned long start,
  127. unsigned long size,
  128. u32 operation)
  129. {
  130. unsigned long flags;
  131. /*
  132. * No spin lock is necessary here because:
  133. *
  134. * [1] This outer cache controller is able to accept maintenance
  135. * operations from multiple CPUs at a time in an SMP system; if a
  136. * maintenance operation is under way and another operation is issued,
  137. * the new one is stored in the queue. The controller performs one
  138. * operation after another. If the queue is full, the status register,
  139. * UNIPHIER_SSCOPPQSEF, indicates that the queue registration has
  140. * failed. The status registers, UNIPHIER_{SSCOPPQSEF, SSCOLPQS}, have
  141. * different instances for each CPU, i.e. each CPU can track the status
  142. * of the maintenance operations triggered by itself.
  143. *
  144. * [2] The cache command registers, UNIPHIER_{SSCOQM, SSCOQAD, SSCOQSZ,
  145. * SSCOQWN}, are shared between multiple CPUs, but the hardware still
  146. * guarantees the registration sequence is atomic; the write access to
  147. * them are arbitrated by the hardware. The first accessor to the
  148. * register, UNIPHIER_SSCOQM, holds the access right and it is released
  149. * by reading the status register, UNIPHIER_SSCOPPQSEF. While one CPU
  150. * is holding the access right, other CPUs fail to register operations.
  151. * One CPU should not hold the access right for a long time, so local
  152. * IRQs should be disabled while the following sequence.
  153. */
  154. local_irq_save(flags);
  155. /* clear the complete notification flag */
  156. writel_relaxed(UNIPHIER_SSCOLPQS_EF, data->op_base + UNIPHIER_SSCOLPQS);
  157. do {
  158. /* set cache operation */
  159. writel_relaxed(UNIPHIER_SSCOQM_CE | operation,
  160. data->op_base + UNIPHIER_SSCOQM);
  161. /* set address range if needed */
  162. if (likely(UNIPHIER_SSCOQM_S_IS_RANGE(operation))) {
  163. writel_relaxed(start, data->op_base + UNIPHIER_SSCOQAD);
  164. writel_relaxed(size, data->op_base + UNIPHIER_SSCOQSZ);
  165. }
  166. /* set target ways if needed */
  167. if (unlikely(UNIPHIER_SSCOQM_TID_IS_WAY(operation)))
  168. writel_relaxed(data->way_locked_mask,
  169. data->op_base + UNIPHIER_SSCOQWN);
  170. } while (unlikely(readl_relaxed(data->op_base + UNIPHIER_SSCOPPQSEF) &
  171. (UNIPHIER_SSCOPPQSEF_FE | UNIPHIER_SSCOPPQSEF_OE)));
  172. /* wait until the operation is completed */
  173. while (likely(readl_relaxed(data->op_base + UNIPHIER_SSCOLPQS) !=
  174. UNIPHIER_SSCOLPQS_EF))
  175. cpu_relax();
  176. local_irq_restore(flags);
  177. }
  178. static void __uniphier_cache_maint_all(struct uniphier_cache_data *data,
  179. u32 operation)
  180. {
  181. __uniphier_cache_maint_common(data, 0, 0,
  182. UNIPHIER_SSCOQM_S_ALL | operation);
  183. __uniphier_cache_sync(data);
  184. }
  185. static void __uniphier_cache_maint_range(struct uniphier_cache_data *data,
  186. unsigned long start, unsigned long end,
  187. u32 operation)
  188. {
  189. unsigned long size;
  190. /*
  191. * If the start address is not aligned,
  192. * perform a cache operation for the first cache-line
  193. */
  194. start = start & ~(data->line_size - 1);
  195. size = end - start;
  196. if (unlikely(size >= (unsigned long)(-data->line_size))) {
  197. /* this means cache operation for all range */
  198. __uniphier_cache_maint_all(data, operation);
  199. return;
  200. }
  201. /*
  202. * If the end address is not aligned,
  203. * perform a cache operation for the last cache-line
  204. */
  205. size = ALIGN(size, data->line_size);
  206. while (size) {
  207. unsigned long chunk_size = min_t(unsigned long, size,
  208. data->range_op_max_size);
  209. __uniphier_cache_maint_common(data, start, chunk_size,
  210. UNIPHIER_SSCOQM_S_RANGE | operation);
  211. start += chunk_size;
  212. size -= chunk_size;
  213. }
  214. __uniphier_cache_sync(data);
  215. }
  216. static void __uniphier_cache_enable(struct uniphier_cache_data *data, bool on)
  217. {
  218. u32 val = 0;
  219. if (on)
  220. val = UNIPHIER_SSCC_WTG | UNIPHIER_SSCC_PRD | UNIPHIER_SSCC_ON;
  221. writel_relaxed(val, data->ctrl_base + UNIPHIER_SSCC);
  222. }
  223. static void __init __uniphier_cache_set_locked_ways(
  224. struct uniphier_cache_data *data,
  225. u32 way_mask)
  226. {
  227. data->way_locked_mask = way_mask & data->way_present_mask;
  228. writel_relaxed(~data->way_locked_mask & data->way_present_mask,
  229. data->ctrl_base + UNIPHIER_SSCLPDAWCR);
  230. }
  231. static void uniphier_cache_maint_range(unsigned long start, unsigned long end,
  232. u32 operation)
  233. {
  234. struct uniphier_cache_data *data;
  235. list_for_each_entry(data, &uniphier_cache_list, list)
  236. __uniphier_cache_maint_range(data, start, end, operation);
  237. }
  238. static void uniphier_cache_maint_all(u32 operation)
  239. {
  240. struct uniphier_cache_data *data;
  241. list_for_each_entry(data, &uniphier_cache_list, list)
  242. __uniphier_cache_maint_all(data, operation);
  243. }
  244. static void uniphier_cache_inv_range(unsigned long start, unsigned long end)
  245. {
  246. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_INV);
  247. }
  248. static void uniphier_cache_clean_range(unsigned long start, unsigned long end)
  249. {
  250. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_CLEAN);
  251. }
  252. static void uniphier_cache_flush_range(unsigned long start, unsigned long end)
  253. {
  254. uniphier_cache_maint_range(start, end, UNIPHIER_SSCOQM_CM_FLUSH);
  255. }
  256. static void __init uniphier_cache_inv_all(void)
  257. {
  258. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_INV);
  259. }
  260. static void uniphier_cache_flush_all(void)
  261. {
  262. uniphier_cache_maint_all(UNIPHIER_SSCOQM_CM_FLUSH);
  263. }
  264. static void uniphier_cache_disable(void)
  265. {
  266. struct uniphier_cache_data *data;
  267. list_for_each_entry_reverse(data, &uniphier_cache_list, list)
  268. __uniphier_cache_enable(data, false);
  269. uniphier_cache_flush_all();
  270. }
  271. static void __init uniphier_cache_enable(void)
  272. {
  273. struct uniphier_cache_data *data;
  274. uniphier_cache_inv_all();
  275. list_for_each_entry(data, &uniphier_cache_list, list) {
  276. __uniphier_cache_enable(data, true);
  277. __uniphier_cache_set_locked_ways(data, 0);
  278. }
  279. }
  280. static void uniphier_cache_sync(void)
  281. {
  282. struct uniphier_cache_data *data;
  283. list_for_each_entry(data, &uniphier_cache_list, list)
  284. __uniphier_cache_sync(data);
  285. }
  286. int __init uniphier_cache_l2_is_enabled(void)
  287. {
  288. struct uniphier_cache_data *data;
  289. data = list_first_entry_or_null(&uniphier_cache_list,
  290. struct uniphier_cache_data, list);
  291. if (!data)
  292. return 0;
  293. return !!(readl_relaxed(data->ctrl_base + UNIPHIER_SSCC) &
  294. UNIPHIER_SSCC_ON);
  295. }
  296. void __init uniphier_cache_l2_touch_range(unsigned long start,
  297. unsigned long end)
  298. {
  299. struct uniphier_cache_data *data;
  300. data = list_first_entry_or_null(&uniphier_cache_list,
  301. struct uniphier_cache_data, list);
  302. if (data)
  303. __uniphier_cache_maint_range(data, start, end,
  304. UNIPHIER_SSCOQM_TID_WAY |
  305. UNIPHIER_SSCOQM_CM_TOUCH);
  306. }
  307. void __init uniphier_cache_l2_set_locked_ways(u32 way_mask)
  308. {
  309. struct uniphier_cache_data *data;
  310. data = list_first_entry_or_null(&uniphier_cache_list,
  311. struct uniphier_cache_data, list);
  312. if (data)
  313. __uniphier_cache_set_locked_ways(data, way_mask);
  314. }
  315. static const struct of_device_id uniphier_cache_match[] __initconst = {
  316. {
  317. .compatible = "socionext,uniphier-system-cache",
  318. },
  319. { /* sentinel */ }
  320. };
  321. static struct device_node * __init uniphier_cache_get_next_level_node(
  322. struct device_node *np)
  323. {
  324. u32 phandle;
  325. if (of_property_read_u32(np, "next-level-cache", &phandle))
  326. return NULL;
  327. return of_find_node_by_phandle(phandle);
  328. }
  329. static int __init __uniphier_cache_init(struct device_node *np,
  330. unsigned int *cache_level)
  331. {
  332. struct uniphier_cache_data *data;
  333. u32 level, cache_size;
  334. struct device_node *next_np;
  335. int ret = 0;
  336. if (!of_match_node(uniphier_cache_match, np)) {
  337. pr_err("L%d: not compatible with uniphier cache\n",
  338. *cache_level);
  339. return -EINVAL;
  340. }
  341. if (of_property_read_u32(np, "cache-level", &level)) {
  342. pr_err("L%d: cache-level is not specified\n", *cache_level);
  343. return -EINVAL;
  344. }
  345. if (level != *cache_level) {
  346. pr_err("L%d: cache-level is unexpected value %d\n",
  347. *cache_level, level);
  348. return -EINVAL;
  349. }
  350. if (!of_property_read_bool(np, "cache-unified")) {
  351. pr_err("L%d: cache-unified is not specified\n", *cache_level);
  352. return -EINVAL;
  353. }
  354. data = kzalloc(sizeof(*data), GFP_KERNEL);
  355. if (!data)
  356. return -ENOMEM;
  357. if (of_property_read_u32(np, "cache-line-size", &data->line_size) ||
  358. !is_power_of_2(data->line_size)) {
  359. pr_err("L%d: cache-line-size is unspecified or invalid\n",
  360. *cache_level);
  361. ret = -EINVAL;
  362. goto err;
  363. }
  364. if (of_property_read_u32(np, "cache-sets", &data->nsets) ||
  365. !is_power_of_2(data->nsets)) {
  366. pr_err("L%d: cache-sets is unspecified or invalid\n",
  367. *cache_level);
  368. ret = -EINVAL;
  369. goto err;
  370. }
  371. if (of_property_read_u32(np, "cache-size", &cache_size) ||
  372. cache_size == 0 || cache_size % (data->nsets * data->line_size)) {
  373. pr_err("L%d: cache-size is unspecified or invalid\n",
  374. *cache_level);
  375. ret = -EINVAL;
  376. goto err;
  377. }
  378. data->way_present_mask =
  379. ((u32)1 << cache_size / data->nsets / data->line_size) - 1;
  380. data->ctrl_base = of_iomap(np, 0);
  381. if (!data->ctrl_base) {
  382. pr_err("L%d: failed to map control register\n", *cache_level);
  383. ret = -ENOMEM;
  384. goto err;
  385. }
  386. data->rev_base = of_iomap(np, 1);
  387. if (!data->rev_base) {
  388. pr_err("L%d: failed to map revision register\n", *cache_level);
  389. ret = -ENOMEM;
  390. goto err;
  391. }
  392. data->op_base = of_iomap(np, 2);
  393. if (!data->op_base) {
  394. pr_err("L%d: failed to map operation register\n", *cache_level);
  395. ret = -ENOMEM;
  396. goto err;
  397. }
  398. if (*cache_level == 2) {
  399. u32 revision = readl(data->rev_base + UNIPHIER_SSCID);
  400. /*
  401. * The size of range operation is limited to (1 << 22) or less
  402. * for PH-sLD8 or older SoCs.
  403. */
  404. if (revision <= 0x16)
  405. data->range_op_max_size = (u32)1 << 22;
  406. }
  407. data->range_op_max_size -= data->line_size;
  408. INIT_LIST_HEAD(&data->list);
  409. list_add_tail(&data->list, &uniphier_cache_list); /* no mutex */
  410. /*
  411. * OK, this level has been successfully initialized. Look for the next
  412. * level cache. Do not roll back even if the initialization of the
  413. * next level cache fails because we want to continue with available
  414. * cache levels.
  415. */
  416. next_np = uniphier_cache_get_next_level_node(np);
  417. if (next_np) {
  418. (*cache_level)++;
  419. ret = __uniphier_cache_init(next_np, cache_level);
  420. }
  421. of_node_put(next_np);
  422. return ret;
  423. err:
  424. iounmap(data->op_base);
  425. iounmap(data->rev_base);
  426. iounmap(data->ctrl_base);
  427. kfree(data);
  428. return ret;
  429. }
  430. int __init uniphier_cache_init(void)
  431. {
  432. struct device_node *np = NULL;
  433. unsigned int cache_level;
  434. int ret = 0;
  435. /* look for level 2 cache */
  436. while ((np = of_find_matching_node(np, uniphier_cache_match)))
  437. if (!of_property_read_u32(np, "cache-level", &cache_level) &&
  438. cache_level == 2)
  439. break;
  440. if (!np)
  441. return -ENODEV;
  442. ret = __uniphier_cache_init(np, &cache_level);
  443. of_node_put(np);
  444. if (ret) {
  445. /*
  446. * Error out iif L2 initialization fails. Continue with any
  447. * error on L3 or outer because they are optional.
  448. */
  449. if (cache_level == 2) {
  450. pr_err("failed to initialize L2 cache\n");
  451. return ret;
  452. }
  453. cache_level--;
  454. ret = 0;
  455. }
  456. outer_cache.inv_range = uniphier_cache_inv_range;
  457. outer_cache.clean_range = uniphier_cache_clean_range;
  458. outer_cache.flush_range = uniphier_cache_flush_range;
  459. outer_cache.flush_all = uniphier_cache_flush_all;
  460. outer_cache.disable = uniphier_cache_disable;
  461. outer_cache.sync = uniphier_cache_sync;
  462. uniphier_cache_enable();
  463. pr_info("enabled outer cache (cache level: %d)\n", cache_level);
  464. return ret;
  465. }