pte-8xx.h 3.0 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465
  1. #ifndef _ASM_POWERPC_PTE_8xx_H
  2. #define _ASM_POWERPC_PTE_8xx_H
  3. #ifdef __KERNEL__
  4. /*
  5. * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk.
  6. * We also use the two level tables, but we can put the real bits in them
  7. * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0,
  8. * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has
  9. * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit
  10. * based upon user/super access. The TLB does not have accessed nor write
  11. * protect. We assume that if the TLB get loaded with an entry it is
  12. * accessed, and overload the changed bit for write protect. We use
  13. * two bits in the software pte that are supposed to be set to zero in
  14. * the TLB entry (24 and 25) for these indicators. Although the level 1
  15. * descriptor contains the guarded and writethrough/copyback bits, we can
  16. * set these at the page level since they get copied from the Mx_TWC
  17. * register when the TLB entry is loaded. We will use bit 27 for guard, since
  18. * that is where it exists in the MD_TWC, and bit 26 for writethrough.
  19. * These will get masked from the level 2 descriptor at TLB load time, and
  20. * copied to the MD_TWC before it gets loaded.
  21. * Large page sizes added. We currently support two sizes, 4K and 8M.
  22. * This also allows a TLB hander optimization because we can directly
  23. * load the PMD into MD_TWC. The 8M pages are only used for kernel
  24. * mapping of well known areas. The PMD (PGD) entries contain control
  25. * flags in addition to the address, so care must be taken that the
  26. * software no longer assumes these are only pointers.
  27. */
  28. /* Definitions for 8xx embedded chips. */
  29. #define _PAGE_PRESENT 0x0001 /* Page is valid */
  30. #define _PAGE_NO_CACHE 0x0002 /* I: cache inhibit */
  31. #define _PAGE_SHARED 0x0004 /* No ASID (context) compare */
  32. #define _PAGE_SPECIAL 0x0008 /* SW entry, forced to 0 by the TLB miss */
  33. #define _PAGE_DIRTY 0x0100 /* C: page changed */
  34. /* These 4 software bits must be masked out when the L2 entry is loaded
  35. * into the TLB.
  36. */
  37. #define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */
  38. #define _PAGE_USER 0x0020 /* Copied to L1 APG lsb */
  39. #define _PAGE_EXEC 0x0040 /* Copied to L1 APG */
  40. #define _PAGE_WRITETHRU 0x0080 /* software: caching is write through */
  41. #define _PAGE_ACCESSED 0x0800 /* software: page referenced */
  42. #define _PAGE_RO 0x0600 /* Supervisor RO, User no access */
  43. #define _PMD_PRESENT 0x0001
  44. #define _PMD_BAD 0x0ff0
  45. #define _PMD_PAGE_MASK 0x000c
  46. #define _PMD_PAGE_8M 0x000c
  47. /* Until my rework is finished, 8xx still needs atomic PTE updates */
  48. #define PTE_ATOMIC_UPDATES 1
  49. /* We need to add _PAGE_SHARED to kernel pages */
  50. #define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_RO)
  51. #define _PAGE_KERNEL_ROX (_PAGE_SHARED | _PAGE_RO | _PAGE_EXEC)
  52. #define _PAGE_KERNEL_RW (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
  53. _PAGE_HWWRITE)
  54. #define _PAGE_KERNEL_RWX (_PAGE_SHARED | _PAGE_DIRTY | _PAGE_RW | \
  55. _PAGE_HWWRITE | _PAGE_EXEC)
  56. #endif /* __KERNEL__ */
  57. #endif /* _ASM_POWERPC_PTE_8xx_H */