pgtable.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174
  1. /* MN10300 Page table management
  2. *
  3. * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
  4. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  5. * Modified by David Howells (dhowells@redhat.com)
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public Licence
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the Licence, or (at your option) any later version.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/kernel.h>
  14. #include <linux/errno.h>
  15. #include <linux/gfp.h>
  16. #include <linux/mm.h>
  17. #include <linux/swap.h>
  18. #include <linux/smp.h>
  19. #include <linux/highmem.h>
  20. #include <linux/pagemap.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/quicklist.h>
  23. #include <asm/pgtable.h>
  24. #include <asm/pgalloc.h>
  25. #include <asm/tlb.h>
  26. #include <asm/tlbflush.h>
  27. /*
  28. * Associate a large virtual page frame with a given physical page frame
  29. * and protection flags for that frame. pfn is for the base of the page,
  30. * vaddr is what the page gets mapped to - both must be properly aligned.
  31. * The pmd must already be instantiated. Assumes PAE mode.
  32. */
  33. void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
  34. {
  35. pgd_t *pgd;
  36. pud_t *pud;
  37. pmd_t *pmd;
  38. if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
  39. printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
  40. return; /* BUG(); */
  41. }
  42. if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
  43. printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
  44. return; /* BUG(); */
  45. }
  46. pgd = swapper_pg_dir + pgd_index(vaddr);
  47. if (pgd_none(*pgd)) {
  48. printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
  49. return; /* BUG(); */
  50. }
  51. pud = pud_offset(pgd, vaddr);
  52. pmd = pmd_offset(pud, vaddr);
  53. set_pmd(pmd, pfn_pmd(pfn, flags));
  54. /*
  55. * It's enough to flush this one mapping.
  56. * (PGE mappings get flushed as well)
  57. */
  58. local_flush_tlb_one(vaddr);
  59. }
  60. pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
  61. {
  62. pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
  63. if (pte)
  64. clear_page(pte);
  65. return pte;
  66. }
  67. struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
  68. {
  69. struct page *pte;
  70. #ifdef CONFIG_HIGHPTE
  71. pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
  72. #else
  73. pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
  74. #endif
  75. if (!pte)
  76. return NULL;
  77. clear_highpage(pte);
  78. if (!pgtable_page_ctor(pte)) {
  79. __free_page(pte);
  80. return NULL;
  81. }
  82. return pte;
  83. }
  84. /*
  85. * List of all pgd's needed for non-PAE so it can invalidate entries
  86. * in both cached and uncached pgd's; not needed for PAE since the
  87. * kernel pmd is shared. If PAE were not to share the pmd a similar
  88. * tactic would be needed. This is essentially codepath-based locking
  89. * against pageattr.c; it is the unique case in which a valid change
  90. * of kernel pagetables can't be lazily synchronized by vmalloc faults.
  91. * vmalloc faults work because attached pagetables are never freed.
  92. * If the locking proves to be non-performant, a ticketing scheme with
  93. * checks at dup_mmap(), exec(), and other mmlist addition points
  94. * could be used. The locking scheme was chosen on the basis of
  95. * manfred's recommendations and having no core impact whatsoever.
  96. * -- nyc
  97. */
  98. DEFINE_SPINLOCK(pgd_lock);
  99. struct page *pgd_list;
  100. static inline void pgd_list_add(pgd_t *pgd)
  101. {
  102. struct page *page = virt_to_page(pgd);
  103. page->index = (unsigned long) pgd_list;
  104. if (pgd_list)
  105. set_page_private(pgd_list, (unsigned long) &page->index);
  106. pgd_list = page;
  107. set_page_private(page, (unsigned long) &pgd_list);
  108. }
  109. static inline void pgd_list_del(pgd_t *pgd)
  110. {
  111. struct page *next, **pprev, *page = virt_to_page(pgd);
  112. next = (struct page *) page->index;
  113. pprev = (struct page **) page_private(page);
  114. *pprev = next;
  115. if (next)
  116. set_page_private(next, (unsigned long) pprev);
  117. }
  118. void pgd_ctor(void *pgd)
  119. {
  120. unsigned long flags;
  121. if (PTRS_PER_PMD == 1)
  122. spin_lock_irqsave(&pgd_lock, flags);
  123. memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
  124. swapper_pg_dir + USER_PTRS_PER_PGD,
  125. (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
  126. if (PTRS_PER_PMD > 1)
  127. return;
  128. pgd_list_add(pgd);
  129. spin_unlock_irqrestore(&pgd_lock, flags);
  130. memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
  131. }
  132. /* never called when PTRS_PER_PMD > 1 */
  133. void pgd_dtor(void *pgd)
  134. {
  135. unsigned long flags; /* can be called from interrupt context */
  136. spin_lock_irqsave(&pgd_lock, flags);
  137. pgd_list_del(pgd);
  138. spin_unlock_irqrestore(&pgd_lock, flags);
  139. }
  140. pgd_t *pgd_alloc(struct mm_struct *mm)
  141. {
  142. return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
  143. }
  144. void pgd_free(struct mm_struct *mm, pgd_t *pgd)
  145. {
  146. quicklist_free(0, pgd_dtor, pgd);
  147. }
  148. void __init pgtable_cache_init(void)
  149. {
  150. }
  151. void check_pgt_cache(void)
  152. {
  153. quicklist_trim(0, pgd_dtor, 25, 16);
  154. }