homecache.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * This code maintains the "home" for each page in the system.
  15. */
  16. #include <linux/kernel.h>
  17. #include <linux/mm.h>
  18. #include <linux/spinlock.h>
  19. #include <linux/list.h>
  20. #include <linux/bootmem.h>
  21. #include <linux/rmap.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/mutex.h>
  24. #include <linux/interrupt.h>
  25. #include <linux/sysctl.h>
  26. #include <linux/pagevec.h>
  27. #include <linux/ptrace.h>
  28. #include <linux/timex.h>
  29. #include <linux/cache.h>
  30. #include <linux/smp.h>
  31. #include <linux/module.h>
  32. #include <linux/hugetlb.h>
  33. #include <asm/page.h>
  34. #include <asm/sections.h>
  35. #include <asm/tlbflush.h>
  36. #include <asm/pgalloc.h>
  37. #include <asm/homecache.h>
  38. #include <arch/sim.h>
  39. #include "migrate.h"
  40. /*
  41. * The noallocl2 option suppresses all use of the L2 cache to cache
  42. * locally from a remote home.
  43. */
  44. static int __write_once noallocl2;
  45. static int __init set_noallocl2(char *str)
  46. {
  47. noallocl2 = 1;
  48. return 0;
  49. }
  50. early_param("noallocl2", set_noallocl2);
  51. /*
  52. * Update the irq_stat for cpus that we are going to interrupt
  53. * with TLB or cache flushes. Also handle removing dataplane cpus
  54. * from the TLB flush set, and setting dataplane_tlb_state instead.
  55. */
  56. static void hv_flush_update(const struct cpumask *cache_cpumask,
  57. struct cpumask *tlb_cpumask,
  58. unsigned long tlb_va, unsigned long tlb_length,
  59. HV_Remote_ASID *asids, int asidcount)
  60. {
  61. struct cpumask mask;
  62. int i, cpu;
  63. cpumask_clear(&mask);
  64. if (cache_cpumask)
  65. cpumask_or(&mask, &mask, cache_cpumask);
  66. if (tlb_cpumask && tlb_length) {
  67. cpumask_or(&mask, &mask, tlb_cpumask);
  68. }
  69. for (i = 0; i < asidcount; ++i)
  70. cpumask_set_cpu(asids[i].y * smp_width + asids[i].x, &mask);
  71. /*
  72. * Don't bother to update atomically; losing a count
  73. * here is not that critical.
  74. */
  75. for_each_cpu(cpu, &mask)
  76. ++per_cpu(irq_stat, cpu).irq_hv_flush_count;
  77. }
  78. /*
  79. * This wrapper function around hv_flush_remote() does several things:
  80. *
  81. * - Provides a return value error-checking panic path, since
  82. * there's never any good reason for hv_flush_remote() to fail.
  83. * - Accepts a 32-bit PFN rather than a 64-bit PA, which generally
  84. * is the type that Linux wants to pass around anyway.
  85. * - Canonicalizes that lengths of zero make cpumasks NULL.
  86. * - Handles deferring TLB flushes for dataplane tiles.
  87. * - Tracks remote interrupts in the per-cpu irq_cpustat_t.
  88. *
  89. * Note that we have to wait until the cache flush completes before
  90. * updating the per-cpu last_cache_flush word, since otherwise another
  91. * concurrent flush can race, conclude the flush has already
  92. * completed, and start to use the page while it's still dirty
  93. * remotely (running concurrently with the actual evict, presumably).
  94. */
  95. void flush_remote(unsigned long cache_pfn, unsigned long cache_control,
  96. const struct cpumask *cache_cpumask_orig,
  97. HV_VirtAddr tlb_va, unsigned long tlb_length,
  98. unsigned long tlb_pgsize,
  99. const struct cpumask *tlb_cpumask_orig,
  100. HV_Remote_ASID *asids, int asidcount)
  101. {
  102. int rc;
  103. struct cpumask cache_cpumask_copy, tlb_cpumask_copy;
  104. struct cpumask *cache_cpumask, *tlb_cpumask;
  105. HV_PhysAddr cache_pa;
  106. mb(); /* provided just to simplify "magic hypervisor" mode */
  107. /*
  108. * Canonicalize and copy the cpumasks.
  109. */
  110. if (cache_cpumask_orig && cache_control) {
  111. cpumask_copy(&cache_cpumask_copy, cache_cpumask_orig);
  112. cache_cpumask = &cache_cpumask_copy;
  113. } else {
  114. cpumask_clear(&cache_cpumask_copy);
  115. cache_cpumask = NULL;
  116. }
  117. if (cache_cpumask == NULL)
  118. cache_control = 0;
  119. if (tlb_cpumask_orig && tlb_length) {
  120. cpumask_copy(&tlb_cpumask_copy, tlb_cpumask_orig);
  121. tlb_cpumask = &tlb_cpumask_copy;
  122. } else {
  123. cpumask_clear(&tlb_cpumask_copy);
  124. tlb_cpumask = NULL;
  125. }
  126. hv_flush_update(cache_cpumask, tlb_cpumask, tlb_va, tlb_length,
  127. asids, asidcount);
  128. cache_pa = (HV_PhysAddr)cache_pfn << PAGE_SHIFT;
  129. rc = hv_flush_remote(cache_pa, cache_control,
  130. cpumask_bits(cache_cpumask),
  131. tlb_va, tlb_length, tlb_pgsize,
  132. cpumask_bits(tlb_cpumask),
  133. asids, asidcount);
  134. if (rc == 0)
  135. return;
  136. pr_err("hv_flush_remote(%#llx, %#lx, %p [%*pb], %#lx, %#lx, %#lx, %p [%*pb], %p, %d) = %d\n",
  137. cache_pa, cache_control, cache_cpumask,
  138. cpumask_pr_args(&cache_cpumask_copy),
  139. (unsigned long)tlb_va, tlb_length, tlb_pgsize, tlb_cpumask,
  140. cpumask_pr_args(&tlb_cpumask_copy), asids, asidcount, rc);
  141. panic("Unsafe to continue.");
  142. }
  143. static void homecache_finv_page_va(void* va, int home)
  144. {
  145. int cpu = get_cpu();
  146. if (home == cpu) {
  147. finv_buffer_local(va, PAGE_SIZE);
  148. } else if (home == PAGE_HOME_HASH) {
  149. finv_buffer_remote(va, PAGE_SIZE, 1);
  150. } else {
  151. BUG_ON(home < 0 || home >= NR_CPUS);
  152. finv_buffer_remote(va, PAGE_SIZE, 0);
  153. }
  154. put_cpu();
  155. }
  156. void homecache_finv_map_page(struct page *page, int home)
  157. {
  158. unsigned long flags;
  159. unsigned long va;
  160. pte_t *ptep;
  161. pte_t pte;
  162. if (home == PAGE_HOME_UNCACHED)
  163. return;
  164. local_irq_save(flags);
  165. #ifdef CONFIG_HIGHMEM
  166. va = __fix_to_virt(FIX_KMAP_BEGIN + kmap_atomic_idx_push() +
  167. (KM_TYPE_NR * smp_processor_id()));
  168. #else
  169. va = __fix_to_virt(FIX_HOMECACHE_BEGIN + smp_processor_id());
  170. #endif
  171. ptep = virt_to_kpte(va);
  172. pte = pfn_pte(page_to_pfn(page), PAGE_KERNEL);
  173. __set_pte(ptep, pte_set_home(pte, home));
  174. homecache_finv_page_va((void *)va, home);
  175. __pte_clear(ptep);
  176. hv_flush_page(va, PAGE_SIZE);
  177. #ifdef CONFIG_HIGHMEM
  178. kmap_atomic_idx_pop();
  179. #endif
  180. local_irq_restore(flags);
  181. }
  182. static void homecache_finv_page_home(struct page *page, int home)
  183. {
  184. if (!PageHighMem(page) && home == page_home(page))
  185. homecache_finv_page_va(page_address(page), home);
  186. else
  187. homecache_finv_map_page(page, home);
  188. }
  189. static inline bool incoherent_home(int home)
  190. {
  191. return home == PAGE_HOME_IMMUTABLE || home == PAGE_HOME_INCOHERENT;
  192. }
  193. static void homecache_finv_page_internal(struct page *page, int force_map)
  194. {
  195. int home = page_home(page);
  196. if (home == PAGE_HOME_UNCACHED)
  197. return;
  198. if (incoherent_home(home)) {
  199. int cpu;
  200. for_each_cpu(cpu, &cpu_cacheable_map)
  201. homecache_finv_map_page(page, cpu);
  202. } else if (force_map) {
  203. /* Force if, e.g., the normal mapping is migrating. */
  204. homecache_finv_map_page(page, home);
  205. } else {
  206. homecache_finv_page_home(page, home);
  207. }
  208. sim_validate_lines_evicted(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
  209. }
  210. void homecache_finv_page(struct page *page)
  211. {
  212. homecache_finv_page_internal(page, 0);
  213. }
  214. void homecache_evict(const struct cpumask *mask)
  215. {
  216. flush_remote(0, HV_FLUSH_EVICT_L2, mask, 0, 0, 0, NULL, NULL, 0);
  217. }
  218. /* Report the home corresponding to a given PTE. */
  219. static int pte_to_home(pte_t pte)
  220. {
  221. if (hv_pte_get_nc(pte))
  222. return PAGE_HOME_IMMUTABLE;
  223. switch (hv_pte_get_mode(pte)) {
  224. case HV_PTE_MODE_CACHE_TILE_L3:
  225. return get_remote_cache_cpu(pte);
  226. case HV_PTE_MODE_CACHE_NO_L3:
  227. return PAGE_HOME_INCOHERENT;
  228. case HV_PTE_MODE_UNCACHED:
  229. return PAGE_HOME_UNCACHED;
  230. case HV_PTE_MODE_CACHE_HASH_L3:
  231. return PAGE_HOME_HASH;
  232. }
  233. panic("Bad PTE %#llx\n", pte.val);
  234. }
  235. /* Update the home of a PTE if necessary (can also be used for a pgprot_t). */
  236. pte_t pte_set_home(pte_t pte, int home)
  237. {
  238. #if CHIP_HAS_MMIO()
  239. /* Check for MMIO mappings and pass them through. */
  240. if (hv_pte_get_mode(pte) == HV_PTE_MODE_MMIO)
  241. return pte;
  242. #endif
  243. /*
  244. * Only immutable pages get NC mappings. If we have a
  245. * non-coherent PTE, but the underlying page is not
  246. * immutable, it's likely the result of a forced
  247. * caching setting running up against ptrace setting
  248. * the page to be writable underneath. In this case,
  249. * just keep the PTE coherent.
  250. */
  251. if (hv_pte_get_nc(pte) && home != PAGE_HOME_IMMUTABLE) {
  252. pte = hv_pte_clear_nc(pte);
  253. pr_err("non-immutable page incoherently referenced: %#llx\n",
  254. pte.val);
  255. }
  256. switch (home) {
  257. case PAGE_HOME_UNCACHED:
  258. pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
  259. break;
  260. case PAGE_HOME_INCOHERENT:
  261. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
  262. break;
  263. case PAGE_HOME_IMMUTABLE:
  264. /*
  265. * We could home this page anywhere, since it's immutable,
  266. * but by default just home it to follow "hash_default".
  267. */
  268. BUG_ON(hv_pte_get_writable(pte));
  269. if (pte_get_forcecache(pte)) {
  270. /* Upgrade "force any cpu" to "No L3" for immutable. */
  271. if (hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_TILE_L3
  272. && pte_get_anyhome(pte)) {
  273. pte = hv_pte_set_mode(pte,
  274. HV_PTE_MODE_CACHE_NO_L3);
  275. }
  276. } else
  277. if (hash_default)
  278. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
  279. else
  280. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
  281. pte = hv_pte_set_nc(pte);
  282. break;
  283. case PAGE_HOME_HASH:
  284. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_HASH_L3);
  285. break;
  286. default:
  287. BUG_ON(home < 0 || home >= NR_CPUS ||
  288. !cpu_is_valid_lotar(home));
  289. pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
  290. pte = set_remote_cache_cpu(pte, home);
  291. break;
  292. }
  293. if (noallocl2)
  294. pte = hv_pte_set_no_alloc_l2(pte);
  295. /* Simplify "no local and no l3" to "uncached" */
  296. if (hv_pte_get_no_alloc_l2(pte) && hv_pte_get_no_alloc_l1(pte) &&
  297. hv_pte_get_mode(pte) == HV_PTE_MODE_CACHE_NO_L3) {
  298. pte = hv_pte_set_mode(pte, HV_PTE_MODE_UNCACHED);
  299. }
  300. /* Checking this case here gives a better panic than from the hv. */
  301. BUG_ON(hv_pte_get_mode(pte) == 0);
  302. return pte;
  303. }
  304. EXPORT_SYMBOL(pte_set_home);
  305. /*
  306. * The routines in this section are the "static" versions of the normal
  307. * dynamic homecaching routines; they just set the home cache
  308. * of a kernel page once, and require a full-chip cache/TLB flush,
  309. * so they're not suitable for anything but infrequent use.
  310. */
  311. int page_home(struct page *page)
  312. {
  313. if (PageHighMem(page)) {
  314. return PAGE_HOME_HASH;
  315. } else {
  316. unsigned long kva = (unsigned long)page_address(page);
  317. return pte_to_home(*virt_to_kpte(kva));
  318. }
  319. }
  320. EXPORT_SYMBOL(page_home);
  321. void homecache_change_page_home(struct page *page, int order, int home)
  322. {
  323. int i, pages = (1 << order);
  324. unsigned long kva;
  325. BUG_ON(PageHighMem(page));
  326. BUG_ON(page_count(page) > 1);
  327. BUG_ON(page_mapcount(page) != 0);
  328. kva = (unsigned long) page_address(page);
  329. flush_remote(0, HV_FLUSH_EVICT_L2, &cpu_cacheable_map,
  330. kva, pages * PAGE_SIZE, PAGE_SIZE, cpu_online_mask,
  331. NULL, 0);
  332. for (i = 0; i < pages; ++i, kva += PAGE_SIZE) {
  333. pte_t *ptep = virt_to_kpte(kva);
  334. pte_t pteval = *ptep;
  335. BUG_ON(!pte_present(pteval) || pte_huge(pteval));
  336. __set_pte(ptep, pte_set_home(pteval, home));
  337. }
  338. }
  339. EXPORT_SYMBOL(homecache_change_page_home);
  340. struct page *homecache_alloc_pages(gfp_t gfp_mask,
  341. unsigned int order, int home)
  342. {
  343. struct page *page;
  344. BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
  345. page = alloc_pages(gfp_mask, order);
  346. if (page)
  347. homecache_change_page_home(page, order, home);
  348. return page;
  349. }
  350. EXPORT_SYMBOL(homecache_alloc_pages);
  351. struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
  352. unsigned int order, int home)
  353. {
  354. struct page *page;
  355. BUG_ON(gfp_mask & __GFP_HIGHMEM); /* must be lowmem */
  356. page = alloc_pages_node(nid, gfp_mask, order);
  357. if (page)
  358. homecache_change_page_home(page, order, home);
  359. return page;
  360. }
  361. void __homecache_free_pages(struct page *page, unsigned int order)
  362. {
  363. if (put_page_testzero(page)) {
  364. homecache_change_page_home(page, order, PAGE_HOME_HASH);
  365. if (order == 0) {
  366. free_hot_cold_page(page, false);
  367. } else {
  368. init_page_count(page);
  369. __free_pages(page, order);
  370. }
  371. }
  372. }
  373. EXPORT_SYMBOL(__homecache_free_pages);
  374. void homecache_free_pages(unsigned long addr, unsigned int order)
  375. {
  376. if (addr != 0) {
  377. VM_BUG_ON(!virt_addr_valid((void *)addr));
  378. __homecache_free_pages(virt_to_page((void *)addr), order);
  379. }
  380. }
  381. EXPORT_SYMBOL(homecache_free_pages);