mmu-meta2.c 5.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * Copyright (C) 2008,2009,2010,2011 Imagination Technologies Ltd.
  3. *
  4. * Meta 2 enhanced mode MMU handling code.
  5. *
  6. */
  7. #include <linux/mm.h>
  8. #include <linux/init.h>
  9. #include <linux/kernel.h>
  10. #include <linux/io.h>
  11. #include <linux/bootmem.h>
  12. #include <linux/syscore_ops.h>
  13. #include <asm/mmu.h>
  14. #include <asm/mmu_context.h>
  15. unsigned long mmu_read_first_level_page(unsigned long vaddr)
  16. {
  17. unsigned int cpu = hard_processor_id();
  18. unsigned long offset, linear_base, linear_limit;
  19. unsigned int phys0;
  20. pgd_t *pgd, entry;
  21. if (is_global_space(vaddr))
  22. vaddr &= ~0x80000000;
  23. offset = vaddr >> PGDIR_SHIFT;
  24. phys0 = metag_in32(mmu_phys0_addr(cpu));
  25. /* Top bit of linear base is always zero. */
  26. linear_base = (phys0 >> PGDIR_SHIFT) & 0x1ff;
  27. /* Limit in the range 0 (4MB) to 9 (2GB). */
  28. linear_limit = 1 << ((phys0 >> 8) & 0xf);
  29. linear_limit += linear_base;
  30. /*
  31. * If offset is below linear base or above the limit then no
  32. * mapping exists.
  33. */
  34. if (offset < linear_base || offset > linear_limit)
  35. return 0;
  36. offset -= linear_base;
  37. pgd = (pgd_t *)mmu_get_base();
  38. entry = pgd[offset];
  39. return pgd_val(entry);
  40. }
  41. unsigned long mmu_read_second_level_page(unsigned long vaddr)
  42. {
  43. return __builtin_meta2_cacherd((void *)(vaddr & PAGE_MASK));
  44. }
  45. unsigned long mmu_get_base(void)
  46. {
  47. unsigned int cpu = hard_processor_id();
  48. unsigned long stride;
  49. stride = cpu * LINSYSMEMTnX_STRIDE;
  50. /*
  51. * Bits 18:2 of the MMCU_TnLocal_TABLE_PHYS1 register should be
  52. * used as an offset to the start of the top-level pgd table.
  53. */
  54. stride += (metag_in32(mmu_phys1_addr(cpu)) & 0x7fffc);
  55. if (is_global_space(PAGE_OFFSET))
  56. stride += LINSYSMEMTXG_OFFSET;
  57. return LINSYSMEMT0L_BASE + stride;
  58. }
  59. #define FIRST_LEVEL_MASK 0xffffffc0
  60. #define SECOND_LEVEL_MASK 0xfffff000
  61. #define SECOND_LEVEL_ALIGN 64
  62. static void repriv_mmu_tables(void)
  63. {
  64. unsigned long phys0_addr;
  65. unsigned int g;
  66. /*
  67. * Check that all the mmu table regions are priv protected, and if not
  68. * fix them and emit a warning. If we left them without priv protection
  69. * then userland processes would have access to a 2M window into
  70. * physical memory near where the page tables are.
  71. */
  72. phys0_addr = MMCU_T0LOCAL_TABLE_PHYS0;
  73. for (g = 0; g < 2; ++g) {
  74. unsigned int t, phys0;
  75. unsigned long flags;
  76. for (t = 0; t < 4; ++t) {
  77. __global_lock2(flags);
  78. phys0 = metag_in32(phys0_addr);
  79. if ((phys0 & _PAGE_PRESENT) && !(phys0 & _PAGE_PRIV)) {
  80. pr_warn("Fixing priv protection on T%d %s MMU table region\n",
  81. t,
  82. g ? "global" : "local");
  83. phys0 |= _PAGE_PRIV;
  84. metag_out32(phys0, phys0_addr);
  85. }
  86. __global_unlock2(flags);
  87. phys0_addr += MMCU_TnX_TABLE_PHYSX_STRIDE;
  88. }
  89. phys0_addr += MMCU_TXG_TABLE_PHYSX_OFFSET
  90. - 4*MMCU_TnX_TABLE_PHYSX_STRIDE;
  91. }
  92. }
  93. #ifdef CONFIG_METAG_SUSPEND_MEM
  94. static void mmu_resume(void)
  95. {
  96. /*
  97. * If a full suspend to RAM has happened then the original bad MMU table
  98. * priv may have been restored, so repriv them again.
  99. */
  100. repriv_mmu_tables();
  101. }
  102. #else
  103. #define mmu_resume NULL
  104. #endif /* CONFIG_METAG_SUSPEND_MEM */
  105. static struct syscore_ops mmu_syscore_ops = {
  106. .resume = mmu_resume,
  107. };
  108. void __init mmu_init(unsigned long mem_end)
  109. {
  110. unsigned long entry, addr;
  111. pgd_t *p_swapper_pg_dir;
  112. #ifdef CONFIG_KERNEL_4M_PAGES
  113. unsigned long mem_size = mem_end - PAGE_OFFSET;
  114. unsigned int pages = DIV_ROUND_UP(mem_size, 1 << 22);
  115. unsigned int second_level_entry = 0;
  116. unsigned long *second_level_table;
  117. #endif
  118. /*
  119. * Now copy over any MMU pgd entries already in the mmu page tables
  120. * over to our root init process (swapper_pg_dir) map. This map is
  121. * then inherited by all other processes, which means all processes
  122. * inherit a map of the kernel space.
  123. */
  124. addr = META_MEMORY_BASE;
  125. entry = pgd_index(META_MEMORY_BASE);
  126. p_swapper_pg_dir = pgd_offset_k(0) + entry;
  127. while (entry < (PTRS_PER_PGD - pgd_index(META_MEMORY_BASE))) {
  128. unsigned long pgd_entry;
  129. /* copy over the current MMU value */
  130. pgd_entry = mmu_read_first_level_page(addr);
  131. pgd_val(*p_swapper_pg_dir) = pgd_entry;
  132. p_swapper_pg_dir++;
  133. addr += PGDIR_SIZE;
  134. entry++;
  135. }
  136. #ifdef CONFIG_KERNEL_4M_PAGES
  137. /*
  138. * At this point we can also map the kernel with 4MB pages to
  139. * reduce TLB pressure.
  140. */
  141. second_level_table = alloc_bootmem_pages(SECOND_LEVEL_ALIGN * pages);
  142. addr = PAGE_OFFSET;
  143. entry = pgd_index(PAGE_OFFSET);
  144. p_swapper_pg_dir = pgd_offset_k(0) + entry;
  145. while (pages > 0) {
  146. unsigned long phys_addr, second_level_phys;
  147. pte_t *pte = (pte_t *)&second_level_table[second_level_entry];
  148. phys_addr = __pa(addr);
  149. second_level_phys = __pa(pte);
  150. pgd_val(*p_swapper_pg_dir) = ((second_level_phys &
  151. FIRST_LEVEL_MASK) |
  152. _PAGE_SZ_4M |
  153. _PAGE_PRESENT);
  154. pte_val(*pte) = ((phys_addr & SECOND_LEVEL_MASK) |
  155. _PAGE_PRESENT | _PAGE_DIRTY |
  156. _PAGE_ACCESSED | _PAGE_WRITE |
  157. _PAGE_CACHEABLE | _PAGE_KERNEL);
  158. p_swapper_pg_dir++;
  159. addr += PGDIR_SIZE;
  160. /* Second level pages must be 64byte aligned. */
  161. second_level_entry += (SECOND_LEVEL_ALIGN /
  162. sizeof(unsigned long));
  163. pages--;
  164. }
  165. load_pgd(swapper_pg_dir, hard_processor_id());
  166. flush_tlb_all();
  167. #endif
  168. repriv_mmu_tables();
  169. register_syscore_ops(&mmu_syscore_ops);
  170. }