dvma.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * Virtual DMA allocation
  3. *
  4. * (C) 1999 Thomas Bogendoerfer (tsbogend@alpha.franken.de)
  5. *
  6. * 11/26/2000 -- disabled the existing code because it didn't work for
  7. * me in 2.4. Replaced with a significantly more primitive version
  8. * similar to the sun3 code. the old functionality was probably more
  9. * desirable, but.... -- Sam Creasey (sammy@oh.verio.com)
  10. *
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/init.h>
  14. #include <linux/bitops.h>
  15. #include <linux/mm.h>
  16. #include <linux/bootmem.h>
  17. #include <linux/vmalloc.h>
  18. #include <asm/sun3x.h>
  19. #include <asm/dvma.h>
  20. #include <asm/io.h>
  21. #include <asm/page.h>
  22. #include <asm/pgtable.h>
  23. #include <asm/pgalloc.h>
  24. /* IOMMU support */
  25. #define IOMMU_ADDR_MASK 0x03ffe000
  26. #define IOMMU_CACHE_INHIBIT 0x00000040
  27. #define IOMMU_FULL_BLOCK 0x00000020
  28. #define IOMMU_MODIFIED 0x00000010
  29. #define IOMMU_USED 0x00000008
  30. #define IOMMU_WRITE_PROTECT 0x00000004
  31. #define IOMMU_DT_MASK 0x00000003
  32. #define IOMMU_DT_INVALID 0x00000000
  33. #define IOMMU_DT_VALID 0x00000001
  34. #define IOMMU_DT_BAD 0x00000002
  35. static volatile unsigned long *iommu_pte = (unsigned long *)SUN3X_IOMMU;
  36. #define dvma_entry_paddr(index) (iommu_pte[index] & IOMMU_ADDR_MASK)
  37. #define dvma_entry_vaddr(index,paddr) ((index << DVMA_PAGE_SHIFT) | \
  38. (paddr & (DVMA_PAGE_SIZE-1)))
  39. #if 0
  40. #define dvma_entry_set(index,addr) (iommu_pte[index] = \
  41. (addr & IOMMU_ADDR_MASK) | \
  42. IOMMU_DT_VALID | IOMMU_CACHE_INHIBIT)
  43. #else
  44. #define dvma_entry_set(index,addr) (iommu_pte[index] = \
  45. (addr & IOMMU_ADDR_MASK) | \
  46. IOMMU_DT_VALID)
  47. #endif
  48. #define dvma_entry_clr(index) (iommu_pte[index] = IOMMU_DT_INVALID)
  49. #define dvma_entry_hash(addr) ((addr >> DVMA_PAGE_SHIFT) ^ \
  50. ((addr & 0x03c00000) >> \
  51. (DVMA_PAGE_SHIFT+4)))
  52. #undef DEBUG
  53. #ifdef DEBUG
  54. /* code to print out a dvma mapping for debugging purposes */
  55. void dvma_print (unsigned long dvma_addr)
  56. {
  57. unsigned long index;
  58. index = dvma_addr >> DVMA_PAGE_SHIFT;
  59. printk("idx %lx dvma_addr %08lx paddr %08lx\n", index, dvma_addr,
  60. dvma_entry_paddr(index));
  61. }
  62. #endif
  63. /* create a virtual mapping for a page assigned within the IOMMU
  64. so that the cpu can reach it easily */
  65. inline int dvma_map_cpu(unsigned long kaddr,
  66. unsigned long vaddr, int len)
  67. {
  68. pgd_t *pgd;
  69. unsigned long end;
  70. int ret = 0;
  71. kaddr &= PAGE_MASK;
  72. vaddr &= PAGE_MASK;
  73. end = PAGE_ALIGN(vaddr + len);
  74. #ifdef DEBUG
  75. printk("dvma: mapping kern %08lx to virt %08lx\n",
  76. kaddr, vaddr);
  77. #endif
  78. pgd = pgd_offset_k(vaddr);
  79. do {
  80. pmd_t *pmd;
  81. unsigned long end2;
  82. if((pmd = pmd_alloc(&init_mm, pgd, vaddr)) == NULL) {
  83. ret = -ENOMEM;
  84. goto out;
  85. }
  86. if((end & PGDIR_MASK) > (vaddr & PGDIR_MASK))
  87. end2 = (vaddr + (PGDIR_SIZE-1)) & PGDIR_MASK;
  88. else
  89. end2 = end;
  90. do {
  91. pte_t *pte;
  92. unsigned long end3;
  93. if((pte = pte_alloc_kernel(pmd, vaddr)) == NULL) {
  94. ret = -ENOMEM;
  95. goto out;
  96. }
  97. if((end2 & PMD_MASK) > (vaddr & PMD_MASK))
  98. end3 = (vaddr + (PMD_SIZE-1)) & PMD_MASK;
  99. else
  100. end3 = end2;
  101. do {
  102. #ifdef DEBUG
  103. printk("mapping %08lx phys to %08lx\n",
  104. __pa(kaddr), vaddr);
  105. #endif
  106. set_pte(pte, pfn_pte(virt_to_pfn(kaddr),
  107. PAGE_KERNEL));
  108. pte++;
  109. kaddr += PAGE_SIZE;
  110. vaddr += PAGE_SIZE;
  111. } while(vaddr < end3);
  112. } while(vaddr < end2);
  113. } while(vaddr < end);
  114. flush_tlb_all();
  115. out:
  116. return ret;
  117. }
  118. inline int dvma_map_iommu(unsigned long kaddr, unsigned long baddr,
  119. int len)
  120. {
  121. unsigned long end, index;
  122. index = baddr >> DVMA_PAGE_SHIFT;
  123. end = ((baddr+len) >> DVMA_PAGE_SHIFT);
  124. if(len & ~DVMA_PAGE_MASK)
  125. end++;
  126. for(; index < end ; index++) {
  127. // if(dvma_entry_use(index))
  128. // BUG();
  129. // printk("mapping pa %lx to ba %lx\n", __pa(kaddr), index << DVMA_PAGE_SHIFT);
  130. dvma_entry_set(index, __pa(kaddr));
  131. iommu_pte[index] |= IOMMU_FULL_BLOCK;
  132. // dvma_entry_inc(index);
  133. kaddr += DVMA_PAGE_SIZE;
  134. }
  135. #ifdef DEBUG
  136. for(index = (baddr >> DVMA_PAGE_SHIFT); index < end; index++)
  137. dvma_print(index << DVMA_PAGE_SHIFT);
  138. #endif
  139. return 0;
  140. }
  141. void dvma_unmap_iommu(unsigned long baddr, int len)
  142. {
  143. int index, end;
  144. index = baddr >> DVMA_PAGE_SHIFT;
  145. end = (DVMA_PAGE_ALIGN(baddr+len) >> DVMA_PAGE_SHIFT);
  146. for(; index < end ; index++) {
  147. #ifdef DEBUG
  148. printk("freeing bus mapping %08x\n", index << DVMA_PAGE_SHIFT);
  149. #endif
  150. #if 0
  151. if(!dvma_entry_use(index))
  152. printk("dvma_unmap freeing unused entry %04x\n",
  153. index);
  154. else
  155. dvma_entry_dec(index);
  156. #endif
  157. dvma_entry_clr(index);
  158. }
  159. }