cobalt-omnitek.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341
  1. /*
  2. * Omnitek Scatter-Gather DMA Controller
  3. *
  4. * Copyright 2012-2015 Cisco Systems, Inc. and/or its affiliates.
  5. * All rights reserved.
  6. *
  7. * This program is free software; you may redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; version 2 of the License.
  10. *
  11. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  12. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  13. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  14. * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  15. * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  16. * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  17. * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  18. * SOFTWARE.
  19. */
  20. #include <linux/string.h>
  21. #include <linux/io.h>
  22. #include <linux/pci_regs.h>
  23. #include <linux/spinlock.h>
  24. #include "cobalt-driver.h"
  25. #include "cobalt-omnitek.h"
  26. /* descriptor */
  27. #define END_OF_CHAIN (1 << 1)
  28. #define INTERRUPT_ENABLE (1 << 2)
  29. #define WRITE_TO_PCI (1 << 3)
  30. #define READ_FROM_PCI (0 << 3)
  31. #define DESCRIPTOR_FLAG_MSK (END_OF_CHAIN | INTERRUPT_ENABLE | WRITE_TO_PCI)
  32. #define NEXT_ADRS_MSK 0xffffffe0
  33. /* control/status register */
  34. #define ENABLE (1 << 0)
  35. #define START (1 << 1)
  36. #define ABORT (1 << 2)
  37. #define DONE (1 << 4)
  38. #define SG_INTERRUPT (1 << 5)
  39. #define EVENT_INTERRUPT (1 << 6)
  40. #define SCATTER_GATHER_MODE (1 << 8)
  41. #define DISABLE_VIDEO_RESYNC (1 << 9)
  42. #define EVENT_INTERRUPT_ENABLE (1 << 10)
  43. #define DIRECTIONAL_MSK (3 << 16)
  44. #define INPUT_ONLY (0 << 16)
  45. #define OUTPUT_ONLY (1 << 16)
  46. #define BIDIRECTIONAL (2 << 16)
  47. #define DMA_TYPE_MEMORY (0 << 18)
  48. #define DMA_TYPE_FIFO (1 << 18)
  49. #define BASE (cobalt->bar0)
  50. #define CAPABILITY_HEADER (BASE)
  51. #define CAPABILITY_REGISTER (BASE + 0x04)
  52. #define PCI_64BIT (1 << 8)
  53. #define LOCAL_64BIT (1 << 9)
  54. #define INTERRUPT_STATUS (BASE + 0x08)
  55. #define PCI(c) (BASE + 0x40 + ((c) * 0x40))
  56. #define SIZE(c) (BASE + 0x58 + ((c) * 0x40))
  57. #define DESCRIPTOR(c) (BASE + 0x50 + ((c) * 0x40))
  58. #define CS_REG(c) (BASE + 0x60 + ((c) * 0x40))
  59. #define BYTES_TRANSFERRED(c) (BASE + 0x64 + ((c) * 0x40))
  60. static char *get_dma_direction(u32 status)
  61. {
  62. switch (status & DIRECTIONAL_MSK) {
  63. case INPUT_ONLY: return "Input";
  64. case OUTPUT_ONLY: return "Output";
  65. case BIDIRECTIONAL: return "Bidirectional";
  66. }
  67. return "";
  68. }
  69. static void show_dma_capability(struct cobalt *cobalt)
  70. {
  71. u32 header = ioread32(CAPABILITY_HEADER);
  72. u32 capa = ioread32(CAPABILITY_REGISTER);
  73. u32 i;
  74. cobalt_info("Omnitek DMA capability: ID 0x%02x Version 0x%02x Next 0x%x Size 0x%x\n",
  75. header & 0xff, (header >> 8) & 0xff,
  76. (header >> 16) & 0xffff, (capa >> 24) & 0xff);
  77. switch ((capa >> 8) & 0x3) {
  78. case 0:
  79. cobalt_info("Omnitek DMA: 32 bits PCIe and Local\n");
  80. break;
  81. case 1:
  82. cobalt_info("Omnitek DMA: 64 bits PCIe, 32 bits Local\n");
  83. break;
  84. case 3:
  85. cobalt_info("Omnitek DMA: 64 bits PCIe and Local\n");
  86. break;
  87. }
  88. for (i = 0; i < (capa & 0xf); i++) {
  89. u32 status = ioread32(CS_REG(i));
  90. cobalt_info("Omnitek DMA channel #%d: %s %s\n", i,
  91. status & DMA_TYPE_FIFO ? "FIFO" : "MEMORY",
  92. get_dma_direction(status));
  93. }
  94. }
  95. void omni_sg_dma_start(struct cobalt_stream *s, struct sg_dma_desc_info *desc)
  96. {
  97. struct cobalt *cobalt = s->cobalt;
  98. iowrite32((u32)((u64)desc->bus >> 32), DESCRIPTOR(s->dma_channel) + 4);
  99. iowrite32((u32)desc->bus & NEXT_ADRS_MSK, DESCRIPTOR(s->dma_channel));
  100. iowrite32(ENABLE | SCATTER_GATHER_MODE | START, CS_REG(s->dma_channel));
  101. }
  102. bool is_dma_done(struct cobalt_stream *s)
  103. {
  104. struct cobalt *cobalt = s->cobalt;
  105. if (ioread32(CS_REG(s->dma_channel)) & DONE)
  106. return true;
  107. return false;
  108. }
  109. void omni_sg_dma_abort_channel(struct cobalt_stream *s)
  110. {
  111. struct cobalt *cobalt = s->cobalt;
  112. if (is_dma_done(s) == false)
  113. iowrite32(ABORT, CS_REG(s->dma_channel));
  114. }
  115. int omni_sg_dma_init(struct cobalt *cobalt)
  116. {
  117. u32 capa = ioread32(CAPABILITY_REGISTER);
  118. int i;
  119. cobalt->first_fifo_channel = 0;
  120. cobalt->dma_channels = capa & 0xf;
  121. if (capa & PCI_64BIT)
  122. cobalt->pci_32_bit = false;
  123. else
  124. cobalt->pci_32_bit = true;
  125. for (i = 0; i < cobalt->dma_channels; i++) {
  126. u32 status = ioread32(CS_REG(i));
  127. u32 ctrl = ioread32(CS_REG(i));
  128. if (!(ctrl & DONE))
  129. iowrite32(ABORT, CS_REG(i));
  130. if (!(status & DMA_TYPE_FIFO))
  131. cobalt->first_fifo_channel++;
  132. }
  133. show_dma_capability(cobalt);
  134. return 0;
  135. }
  136. int descriptor_list_create(struct cobalt *cobalt,
  137. struct scatterlist *scatter_list, bool to_pci, unsigned sglen,
  138. unsigned size, unsigned width, unsigned stride,
  139. struct sg_dma_desc_info *desc)
  140. {
  141. struct sg_dma_descriptor *d = (struct sg_dma_descriptor *)desc->virt;
  142. dma_addr_t next = desc->bus;
  143. unsigned offset = 0;
  144. unsigned copy_bytes = width;
  145. unsigned copied = 0;
  146. bool first = true;
  147. /* Must be 4-byte aligned */
  148. WARN_ON(sg_dma_address(scatter_list) & 3);
  149. WARN_ON(size & 3);
  150. WARN_ON(next & 3);
  151. WARN_ON(stride & 3);
  152. WARN_ON(stride < width);
  153. if (width >= stride)
  154. copy_bytes = stride = size;
  155. while (size) {
  156. dma_addr_t addr = sg_dma_address(scatter_list) + offset;
  157. unsigned bytes;
  158. if (addr == 0)
  159. return -EFAULT;
  160. if (cobalt->pci_32_bit) {
  161. WARN_ON((u64)addr >> 32);
  162. if ((u64)addr >> 32)
  163. return -EFAULT;
  164. }
  165. /* PCIe address */
  166. d->pci_l = addr & 0xffffffff;
  167. /* If dma_addr_t is 32 bits, then addr >> 32 is actually the
  168. equivalent of addr >> 0 in gcc. So must cast to u64. */
  169. d->pci_h = (u64)addr >> 32;
  170. /* Sync to start of streaming frame */
  171. d->local = 0;
  172. d->reserved0 = 0;
  173. /* Transfer bytes */
  174. bytes = min(sg_dma_len(scatter_list) - offset,
  175. copy_bytes - copied);
  176. if (first) {
  177. if (to_pci)
  178. d->local = 0x11111111;
  179. first = false;
  180. if (sglen == 1) {
  181. /* Make sure there are always at least two
  182. * descriptors */
  183. d->bytes = (bytes / 2) & ~3;
  184. d->reserved1 = 0;
  185. size -= d->bytes;
  186. copied += d->bytes;
  187. offset += d->bytes;
  188. addr += d->bytes;
  189. next += sizeof(struct sg_dma_descriptor);
  190. d->next_h = (u32)((u64)next >> 32);
  191. d->next_l = (u32)next |
  192. (to_pci ? WRITE_TO_PCI : 0);
  193. bytes -= d->bytes;
  194. d++;
  195. /* PCIe address */
  196. d->pci_l = addr & 0xffffffff;
  197. /* If dma_addr_t is 32 bits, then addr >> 32
  198. * is actually the equivalent of addr >> 0 in
  199. * gcc. So must cast to u64. */
  200. d->pci_h = (u64)addr >> 32;
  201. /* Sync to start of streaming frame */
  202. d->local = 0;
  203. d->reserved0 = 0;
  204. }
  205. }
  206. d->bytes = bytes;
  207. d->reserved1 = 0;
  208. size -= bytes;
  209. copied += bytes;
  210. offset += bytes;
  211. if (copied == copy_bytes) {
  212. while (copied < stride) {
  213. bytes = min(sg_dma_len(scatter_list) - offset,
  214. stride - copied);
  215. copied += bytes;
  216. offset += bytes;
  217. size -= bytes;
  218. if (sg_dma_len(scatter_list) == offset) {
  219. offset = 0;
  220. scatter_list = sg_next(scatter_list);
  221. }
  222. }
  223. copied = 0;
  224. } else {
  225. offset = 0;
  226. scatter_list = sg_next(scatter_list);
  227. }
  228. /* Next descriptor + control bits */
  229. next += sizeof(struct sg_dma_descriptor);
  230. if (size == 0) {
  231. /* Loopback to the first descriptor */
  232. d->next_h = (u32)((u64)desc->bus >> 32);
  233. d->next_l = (u32)desc->bus |
  234. (to_pci ? WRITE_TO_PCI : 0) | INTERRUPT_ENABLE;
  235. if (!to_pci)
  236. d->local = 0x22222222;
  237. desc->last_desc_virt = d;
  238. } else {
  239. d->next_h = (u32)((u64)next >> 32);
  240. d->next_l = (u32)next | (to_pci ? WRITE_TO_PCI : 0);
  241. }
  242. d++;
  243. }
  244. return 0;
  245. }
  246. void descriptor_list_chain(struct sg_dma_desc_info *this,
  247. struct sg_dma_desc_info *next)
  248. {
  249. struct sg_dma_descriptor *d = this->last_desc_virt;
  250. u32 direction = d->next_l & WRITE_TO_PCI;
  251. if (next == NULL) {
  252. d->next_h = 0;
  253. d->next_l = direction | INTERRUPT_ENABLE | END_OF_CHAIN;
  254. } else {
  255. d->next_h = (u32)((u64)next->bus >> 32);
  256. d->next_l = (u32)next->bus | direction | INTERRUPT_ENABLE;
  257. }
  258. }
  259. void *descriptor_list_allocate(struct sg_dma_desc_info *desc, size_t bytes)
  260. {
  261. desc->size = bytes;
  262. desc->virt = dma_alloc_coherent(desc->dev, bytes,
  263. &desc->bus, GFP_KERNEL);
  264. return desc->virt;
  265. }
  266. void descriptor_list_free(struct sg_dma_desc_info *desc)
  267. {
  268. if (desc->virt)
  269. dma_free_coherent(desc->dev, desc->size,
  270. desc->virt, desc->bus);
  271. desc->virt = NULL;
  272. }
  273. void descriptor_list_interrupt_enable(struct sg_dma_desc_info *desc)
  274. {
  275. struct sg_dma_descriptor *d = desc->last_desc_virt;
  276. d->next_l |= INTERRUPT_ENABLE;
  277. }
  278. void descriptor_list_interrupt_disable(struct sg_dma_desc_info *desc)
  279. {
  280. struct sg_dma_descriptor *d = desc->last_desc_virt;
  281. d->next_l &= ~INTERRUPT_ENABLE;
  282. }
  283. void descriptor_list_loopback(struct sg_dma_desc_info *desc)
  284. {
  285. struct sg_dma_descriptor *d = desc->last_desc_virt;
  286. d->next_h = (u32)((u64)desc->bus >> 32);
  287. d->next_l = (u32)desc->bus | (d->next_l & DESCRIPTOR_FLAG_MSK);
  288. }
  289. void descriptor_list_end_of_chain(struct sg_dma_desc_info *desc)
  290. {
  291. struct sg_dma_descriptor *d = desc->last_desc_virt;
  292. d->next_l |= END_OF_CHAIN;
  293. }