pgtable_32.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452
  1. #ifndef _SPARC_PGTABLE_H
  2. #define _SPARC_PGTABLE_H
  3. /* asm/pgtable.h: Defines and functions used to work
  4. * with Sparc page tables.
  5. *
  6. * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
  7. * Copyright (C) 1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
  8. */
  9. #include <linux/const.h>
  10. #ifndef __ASSEMBLY__
  11. #include <asm-generic/4level-fixup.h>
  12. #include <linux/spinlock.h>
  13. #include <linux/mm_types.h>
  14. #include <asm/types.h>
  15. #include <asm/pgtsrmmu.h>
  16. #include <asm/vaddrs.h>
  17. #include <asm/oplib.h>
  18. #include <asm/cpu_type.h>
  19. struct vm_area_struct;
  20. struct page;
  21. void load_mmu(void);
  22. unsigned long calc_highpages(void);
  23. unsigned long __init bootmem_init(unsigned long *pages_avail);
  24. #define pte_ERROR(e) __builtin_trap()
  25. #define pmd_ERROR(e) __builtin_trap()
  26. #define pgd_ERROR(e) __builtin_trap()
  27. #define PMD_SHIFT 22
  28. #define PMD_SIZE (1UL << PMD_SHIFT)
  29. #define PMD_MASK (~(PMD_SIZE-1))
  30. #define PMD_ALIGN(__addr) (((__addr) + ~PMD_MASK) & PMD_MASK)
  31. #define PGDIR_SHIFT SRMMU_PGDIR_SHIFT
  32. #define PGDIR_SIZE SRMMU_PGDIR_SIZE
  33. #define PGDIR_MASK SRMMU_PGDIR_MASK
  34. #define PTRS_PER_PTE 1024
  35. #define PTRS_PER_PMD SRMMU_PTRS_PER_PMD
  36. #define PTRS_PER_PGD SRMMU_PTRS_PER_PGD
  37. #define USER_PTRS_PER_PGD PAGE_OFFSET / SRMMU_PGDIR_SIZE
  38. #define FIRST_USER_ADDRESS 0UL
  39. #define PTE_SIZE (PTRS_PER_PTE*4)
  40. #define PAGE_NONE SRMMU_PAGE_NONE
  41. #define PAGE_SHARED SRMMU_PAGE_SHARED
  42. #define PAGE_COPY SRMMU_PAGE_COPY
  43. #define PAGE_READONLY SRMMU_PAGE_RDONLY
  44. #define PAGE_KERNEL SRMMU_PAGE_KERNEL
  45. /* Top-level page directory - dummy used by init-mm.
  46. * srmmu.c will assign the real one (which is dynamically sized) */
  47. #define swapper_pg_dir NULL
  48. void paging_init(void);
  49. extern unsigned long ptr_in_current_pgd;
  50. /* xwr */
  51. #define __P000 PAGE_NONE
  52. #define __P001 PAGE_READONLY
  53. #define __P010 PAGE_COPY
  54. #define __P011 PAGE_COPY
  55. #define __P100 PAGE_READONLY
  56. #define __P101 PAGE_READONLY
  57. #define __P110 PAGE_COPY
  58. #define __P111 PAGE_COPY
  59. #define __S000 PAGE_NONE
  60. #define __S001 PAGE_READONLY
  61. #define __S010 PAGE_SHARED
  62. #define __S011 PAGE_SHARED
  63. #define __S100 PAGE_READONLY
  64. #define __S101 PAGE_READONLY
  65. #define __S110 PAGE_SHARED
  66. #define __S111 PAGE_SHARED
  67. /* First physical page can be anywhere, the following is needed so that
  68. * va-->pa and vice versa conversions work properly without performance
  69. * hit for all __pa()/__va() operations.
  70. */
  71. extern unsigned long phys_base;
  72. extern unsigned long pfn_base;
  73. /*
  74. * ZERO_PAGE is a global shared page that is always zero: used
  75. * for zero-mapped memory areas etc..
  76. */
  77. extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
  78. #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
  79. /*
  80. * In general all page table modifications should use the V8 atomic
  81. * swap instruction. This insures the mmu and the cpu are in sync
  82. * with respect to ref/mod bits in the page tables.
  83. */
  84. static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value)
  85. {
  86. __asm__ __volatile__("swap [%2], %0" :
  87. "=&r" (value) : "0" (value), "r" (addr) : "memory");
  88. return value;
  89. }
  90. /* Certain architectures need to do special things when pte's
  91. * within a page table are directly modified. Thus, the following
  92. * hook is made available.
  93. */
  94. static inline void set_pte(pte_t *ptep, pte_t pteval)
  95. {
  96. srmmu_swap((unsigned long *)ptep, pte_val(pteval));
  97. }
  98. #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
  99. static inline int srmmu_device_memory(unsigned long x)
  100. {
  101. return ((x & 0xF0000000) != 0);
  102. }
  103. static inline struct page *pmd_page(pmd_t pmd)
  104. {
  105. if (srmmu_device_memory(pmd_val(pmd)))
  106. BUG();
  107. return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
  108. }
  109. static inline unsigned long pgd_page_vaddr(pgd_t pgd)
  110. {
  111. if (srmmu_device_memory(pgd_val(pgd))) {
  112. return ~0;
  113. } else {
  114. unsigned long v = pgd_val(pgd) & SRMMU_PTD_PMASK;
  115. return (unsigned long)__nocache_va(v << 4);
  116. }
  117. }
  118. static inline int pte_present(pte_t pte)
  119. {
  120. return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE);
  121. }
  122. static inline int pte_none(pte_t pte)
  123. {
  124. return !pte_val(pte);
  125. }
  126. static inline void __pte_clear(pte_t *ptep)
  127. {
  128. set_pte(ptep, __pte(0));
  129. }
  130. static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
  131. {
  132. __pte_clear(ptep);
  133. }
  134. static inline int pmd_bad(pmd_t pmd)
  135. {
  136. return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
  137. }
  138. static inline int pmd_present(pmd_t pmd)
  139. {
  140. return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
  141. }
  142. static inline int pmd_none(pmd_t pmd)
  143. {
  144. return !pmd_val(pmd);
  145. }
  146. static inline void pmd_clear(pmd_t *pmdp)
  147. {
  148. int i;
  149. for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++)
  150. set_pte((pte_t *)&pmdp->pmdv[i], __pte(0));
  151. }
  152. static inline int pgd_none(pgd_t pgd)
  153. {
  154. return !(pgd_val(pgd) & 0xFFFFFFF);
  155. }
  156. static inline int pgd_bad(pgd_t pgd)
  157. {
  158. return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD;
  159. }
  160. static inline int pgd_present(pgd_t pgd)
  161. {
  162. return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD);
  163. }
  164. static inline void pgd_clear(pgd_t *pgdp)
  165. {
  166. set_pte((pte_t *)pgdp, __pte(0));
  167. }
  168. /*
  169. * The following only work if pte_present() is true.
  170. * Undefined behaviour if not..
  171. */
  172. static inline int pte_write(pte_t pte)
  173. {
  174. return pte_val(pte) & SRMMU_WRITE;
  175. }
  176. static inline int pte_dirty(pte_t pte)
  177. {
  178. return pte_val(pte) & SRMMU_DIRTY;
  179. }
  180. static inline int pte_young(pte_t pte)
  181. {
  182. return pte_val(pte) & SRMMU_REF;
  183. }
  184. static inline int pte_special(pte_t pte)
  185. {
  186. return 0;
  187. }
  188. static inline pte_t pte_wrprotect(pte_t pte)
  189. {
  190. return __pte(pte_val(pte) & ~SRMMU_WRITE);
  191. }
  192. static inline pte_t pte_mkclean(pte_t pte)
  193. {
  194. return __pte(pte_val(pte) & ~SRMMU_DIRTY);
  195. }
  196. static inline pte_t pte_mkold(pte_t pte)
  197. {
  198. return __pte(pte_val(pte) & ~SRMMU_REF);
  199. }
  200. static inline pte_t pte_mkwrite(pte_t pte)
  201. {
  202. return __pte(pte_val(pte) | SRMMU_WRITE);
  203. }
  204. static inline pte_t pte_mkdirty(pte_t pte)
  205. {
  206. return __pte(pte_val(pte) | SRMMU_DIRTY);
  207. }
  208. static inline pte_t pte_mkyoung(pte_t pte)
  209. {
  210. return __pte(pte_val(pte) | SRMMU_REF);
  211. }
  212. #define pte_mkspecial(pte) (pte)
  213. #define pfn_pte(pfn, prot) mk_pte(pfn_to_page(pfn), prot)
  214. static inline unsigned long pte_pfn(pte_t pte)
  215. {
  216. if (srmmu_device_memory(pte_val(pte))) {
  217. /* Just return something that will cause
  218. * pfn_valid() to return false. This makes
  219. * copy_one_pte() to just directly copy to
  220. * PTE over.
  221. */
  222. return ~0UL;
  223. }
  224. return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
  225. }
  226. #define pte_page(pte) pfn_to_page(pte_pfn(pte))
  227. /*
  228. * Conversion functions: convert a page and protection to a page entry,
  229. * and a page entry and page directory to the page they refer to.
  230. */
  231. static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
  232. {
  233. return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot));
  234. }
  235. static inline pte_t mk_pte_phys(unsigned long page, pgprot_t pgprot)
  236. {
  237. return __pte(((page) >> 4) | pgprot_val(pgprot));
  238. }
  239. static inline pte_t mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
  240. {
  241. return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot));
  242. }
  243. #define pgprot_noncached pgprot_noncached
  244. static inline pgprot_t pgprot_noncached(pgprot_t prot)
  245. {
  246. prot &= ~__pgprot(SRMMU_CACHE);
  247. return prot;
  248. }
  249. static pte_t pte_modify(pte_t pte, pgprot_t newprot) __attribute_const__;
  250. static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
  251. {
  252. return __pte((pte_val(pte) & SRMMU_CHG_MASK) |
  253. pgprot_val(newprot));
  254. }
  255. #define pgd_index(address) ((address) >> PGDIR_SHIFT)
  256. /* to find an entry in a page-table-directory */
  257. #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
  258. /* to find an entry in a kernel page-table-directory */
  259. #define pgd_offset_k(address) pgd_offset(&init_mm, address)
  260. /* Find an entry in the second-level page table.. */
  261. static inline pmd_t *pmd_offset(pgd_t * dir, unsigned long address)
  262. {
  263. return (pmd_t *) pgd_page_vaddr(*dir) +
  264. ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
  265. }
  266. /* Find an entry in the third-level page table.. */
  267. pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address);
  268. /*
  269. * This shortcut works on sun4m (and sun4d) because the nocache area is static.
  270. */
  271. #define pte_offset_map(d, a) pte_offset_kernel(d,a)
  272. #define pte_unmap(pte) do{}while(0)
  273. struct seq_file;
  274. void mmu_info(struct seq_file *m);
  275. /* Fault handler stuff... */
  276. #define FAULT_CODE_PROT 0x1
  277. #define FAULT_CODE_WRITE 0x2
  278. #define FAULT_CODE_USER 0x4
  279. #define update_mmu_cache(vma, address, ptep) do { } while (0)
  280. void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
  281. unsigned long xva, unsigned int len);
  282. void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len);
  283. /* Encode and de-code a swap entry */
  284. static inline unsigned long __swp_type(swp_entry_t entry)
  285. {
  286. return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
  287. }
  288. static inline unsigned long __swp_offset(swp_entry_t entry)
  289. {
  290. return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
  291. }
  292. static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
  293. {
  294. return (swp_entry_t) {
  295. (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
  296. | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
  297. }
  298. #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
  299. #define __swp_entry_to_pte(x) ((pte_t) { (x).val })
  300. static inline unsigned long
  301. __get_phys (unsigned long addr)
  302. {
  303. switch (sparc_cpu_model){
  304. case sun4m:
  305. case sun4d:
  306. return ((srmmu_get_pte (addr) & 0xffffff00) << 4);
  307. default:
  308. return 0;
  309. }
  310. }
  311. static inline int
  312. __get_iospace (unsigned long addr)
  313. {
  314. switch (sparc_cpu_model){
  315. case sun4m:
  316. case sun4d:
  317. return (srmmu_get_pte (addr) >> 28);
  318. default:
  319. return -1;
  320. }
  321. }
  322. extern unsigned long *sparc_valid_addr_bitmap;
  323. /* Needs to be defined here and not in linux/mm.h, as it is arch dependent */
  324. #define kern_addr_valid(addr) \
  325. (test_bit(__pa((unsigned long)(addr))>>20, sparc_valid_addr_bitmap))
  326. /*
  327. * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
  328. * its high 4 bits. These macros/functions put it there or get it from there.
  329. */
  330. #define MK_IOSPACE_PFN(space, pfn) (pfn | (space << (BITS_PER_LONG - 4)))
  331. #define GET_IOSPACE(pfn) (pfn >> (BITS_PER_LONG - 4))
  332. #define GET_PFN(pfn) (pfn & 0x0fffffffUL)
  333. int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
  334. unsigned long, pgprot_t);
  335. static inline int io_remap_pfn_range(struct vm_area_struct *vma,
  336. unsigned long from, unsigned long pfn,
  337. unsigned long size, pgprot_t prot)
  338. {
  339. unsigned long long offset, space, phys_base;
  340. offset = ((unsigned long long) GET_PFN(pfn)) << PAGE_SHIFT;
  341. space = GET_IOSPACE(pfn);
  342. phys_base = offset | (space << 32ULL);
  343. return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot);
  344. }
  345. #define io_remap_pfn_range io_remap_pfn_range
  346. #define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
  347. #define ptep_set_access_flags(__vma, __address, __ptep, __entry, __dirty) \
  348. ({ \
  349. int __changed = !pte_same(*(__ptep), __entry); \
  350. if (__changed) { \
  351. set_pte_at((__vma)->vm_mm, (__address), __ptep, __entry); \
  352. flush_tlb_page(__vma, __address); \
  353. } \
  354. __changed; \
  355. })
  356. #include <asm-generic/pgtable.h>
  357. #endif /* !(__ASSEMBLY__) */
  358. #define VMALLOC_START _AC(0xfe600000,UL)
  359. #define VMALLOC_END _AC(0xffc00000,UL)
  360. /* We provide our own get_unmapped_area to cope with VA holes for userland */
  361. #define HAVE_ARCH_UNMAPPED_AREA
  362. /*
  363. * No page table caches to initialise
  364. */
  365. #define pgtable_cache_init() do { } while (0)
  366. #endif /* !(_SPARC_PGTABLE_H) */