homecache.h 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * Handle issues around the Tile "home cache" model of coherence.
  15. */
  16. #ifndef _ASM_TILE_HOMECACHE_H
  17. #define _ASM_TILE_HOMECACHE_H
  18. #include <asm/page.h>
  19. #include <linux/cpumask.h>
  20. struct page;
  21. struct task_struct;
  22. struct vm_area_struct;
  23. struct zone;
  24. /*
  25. * Coherence point for the page is its memory controller.
  26. * It is not present in any cache (L1 or L2).
  27. */
  28. #define PAGE_HOME_UNCACHED -1
  29. /*
  30. * Is this page immutable (unwritable) and thus able to be cached more
  31. * widely than would otherwise be possible? This means we have "nc" set.
  32. */
  33. #define PAGE_HOME_IMMUTABLE -2
  34. /*
  35. * Each cpu considers its own cache to be the home for the page,
  36. * which makes it incoherent.
  37. */
  38. #define PAGE_HOME_INCOHERENT -3
  39. /* Home for the page is distributed via hash-for-home. */
  40. #define PAGE_HOME_HASH -4
  41. /* Support wrapper to use instead of explicit hv_flush_remote(). */
  42. extern void flush_remote(unsigned long cache_pfn, unsigned long cache_length,
  43. const struct cpumask *cache_cpumask,
  44. HV_VirtAddr tlb_va, unsigned long tlb_length,
  45. unsigned long tlb_pgsize,
  46. const struct cpumask *tlb_cpumask,
  47. HV_Remote_ASID *asids, int asidcount);
  48. /* Set homing-related bits in a PTE (can also pass a pgprot_t). */
  49. extern pte_t pte_set_home(pte_t pte, int home);
  50. /* Do a cache eviction on the specified cpus. */
  51. extern void homecache_evict(const struct cpumask *mask);
  52. /*
  53. * Change a kernel page's homecache. It must not be mapped in user space.
  54. * If !CONFIG_HOMECACHE, only usable on LOWMEM, and can only be called when
  55. * no other cpu can reference the page, and causes a full-chip cache/TLB flush.
  56. */
  57. extern void homecache_change_page_home(struct page *, int order, int home);
  58. /*
  59. * Flush a page out of whatever cache(s) it is in.
  60. * This is more than just finv, since it properly handles waiting
  61. * for the data to reach memory, but it can be quite
  62. * heavyweight, particularly on incoherent or immutable memory.
  63. */
  64. extern void homecache_finv_page(struct page *);
  65. /*
  66. * Flush a page out of the specified home cache.
  67. * Note that the specified home need not be the actual home of the page,
  68. * as for example might be the case when coordinating with I/O devices.
  69. */
  70. extern void homecache_finv_map_page(struct page *, int home);
  71. /*
  72. * Allocate a page with the given GFP flags, home, and optionally
  73. * node. These routines are actually just wrappers around the normal
  74. * alloc_pages() / alloc_pages_node() functions, which set and clear
  75. * a per-cpu variable to communicate with homecache_new_kernel_page().
  76. * If !CONFIG_HOMECACHE, uses homecache_change_page_home().
  77. */
  78. extern struct page *homecache_alloc_pages(gfp_t gfp_mask,
  79. unsigned int order, int home);
  80. extern struct page *homecache_alloc_pages_node(int nid, gfp_t gfp_mask,
  81. unsigned int order, int home);
  82. #define homecache_alloc_page(gfp_mask, home) \
  83. homecache_alloc_pages(gfp_mask, 0, home)
  84. /*
  85. * These routines are just pass-throughs to free_pages() when
  86. * we support full homecaching. If !CONFIG_HOMECACHE, then these
  87. * routines use homecache_change_page_home() to reset the home
  88. * back to the default before returning the page to the allocator.
  89. */
  90. void __homecache_free_pages(struct page *, unsigned int order);
  91. void homecache_free_pages(unsigned long addr, unsigned int order);
  92. #define __homecache_free_page(page) __homecache_free_pages((page), 0)
  93. #define homecache_free_page(page) homecache_free_pages((page), 0)
  94. /*
  95. * Report the page home for LOWMEM pages by examining their kernel PTE,
  96. * or for highmem pages as the default home.
  97. */
  98. extern int page_home(struct page *);
  99. #define homecache_migrate_kthread() do {} while (0)
  100. #define homecache_kpte_lock() 0
  101. #define homecache_kpte_unlock(flags) do {} while (0)
  102. #endif /* _ASM_TILE_HOMECACHE_H */