pgalloc.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112
  1. /*
  2. * linux/arch/unicore32/include/asm/pgalloc.h
  3. *
  4. * Code specific to PKUnity SoC and UniCore ISA
  5. *
  6. * Copyright (C) 2001-2010 GUAN Xue-tao
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #ifndef __UNICORE_PGALLOC_H__
  13. #define __UNICORE_PGALLOC_H__
  14. #include <asm/pgtable-hwdef.h>
  15. #include <asm/processor.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/tlbflush.h>
  18. #define check_pgt_cache() do { } while (0)
  19. #define _PAGE_USER_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
  20. #define _PAGE_KERNEL_TABLE (PMD_TYPE_TABLE | PMD_PRESENT)
  21. extern pgd_t *get_pgd_slow(struct mm_struct *mm);
  22. extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
  23. #define pgd_alloc(mm) get_pgd_slow(mm)
  24. #define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
  25. #define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
  26. /*
  27. * Allocate one PTE table.
  28. */
  29. static inline pte_t *
  30. pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
  31. {
  32. pte_t *pte;
  33. pte = (pte_t *)__get_free_page(PGALLOC_GFP);
  34. if (pte)
  35. clean_dcache_area(pte, PTRS_PER_PTE * sizeof(pte_t));
  36. return pte;
  37. }
  38. static inline pgtable_t
  39. pte_alloc_one(struct mm_struct *mm, unsigned long addr)
  40. {
  41. struct page *pte;
  42. pte = alloc_pages(PGALLOC_GFP, 0);
  43. if (!pte)
  44. return NULL;
  45. if (!PageHighMem(pte)) {
  46. void *page = page_address(pte);
  47. clean_dcache_area(page, PTRS_PER_PTE * sizeof(pte_t));
  48. }
  49. if (!pgtable_page_ctor(pte)) {
  50. __free_page(pte);
  51. }
  52. return pte;
  53. }
  54. /*
  55. * Free one PTE table.
  56. */
  57. static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
  58. {
  59. if (pte)
  60. free_page((unsigned long)pte);
  61. }
  62. static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
  63. {
  64. pgtable_page_dtor(pte);
  65. __free_page(pte);
  66. }
  67. static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
  68. {
  69. set_pmd(pmdp, __pmd(pmdval));
  70. flush_pmd_entry(pmdp);
  71. }
  72. /*
  73. * Populate the pmdp entry with a pointer to the pte. This pmd is part
  74. * of the mm address space.
  75. */
  76. static inline void
  77. pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
  78. {
  79. unsigned long pte_ptr = (unsigned long)ptep;
  80. /*
  81. * The pmd must be loaded with the physical
  82. * address of the PTE table
  83. */
  84. __pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
  85. }
  86. static inline void
  87. pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
  88. {
  89. __pmd_populate(pmdp,
  90. page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
  91. }
  92. #define pmd_pgtable(pmd) pmd_page(pmd)
  93. #endif