dma_queue.c 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176
  1. /*
  2. * Copyright 2012 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/io.h>
  15. #include <linux/atomic.h>
  16. #include <linux/module.h>
  17. #include <gxio/dma_queue.h>
  18. /* Wait for a memory read to complete. */
  19. #define wait_for_value(val) \
  20. __asm__ __volatile__("move %0, %0" :: "r"(val))
  21. /* The index is in the low 16. */
  22. #define DMA_QUEUE_INDEX_MASK ((1 << 16) - 1)
  23. /*
  24. * The hardware descriptor-ring type.
  25. * This matches the types used by mpipe (MPIPE_EDMA_POST_REGION_VAL_t)
  26. * and trio (TRIO_PUSH_DMA_REGION_VAL_t or TRIO_PULL_DMA_REGION_VAL_t).
  27. * See those types for more documentation on the individual fields.
  28. */
  29. typedef union {
  30. struct {
  31. #ifndef __BIG_ENDIAN__
  32. uint64_t ring_idx:16;
  33. uint64_t count:16;
  34. uint64_t gen:1;
  35. uint64_t __reserved:31;
  36. #else
  37. uint64_t __reserved:31;
  38. uint64_t gen:1;
  39. uint64_t count:16;
  40. uint64_t ring_idx:16;
  41. #endif
  42. };
  43. uint64_t word;
  44. } __gxio_ring_t;
  45. void __gxio_dma_queue_init(__gxio_dma_queue_t *dma_queue,
  46. void *post_region_addr, unsigned int num_entries)
  47. {
  48. /*
  49. * Limit 65536 entry rings to 65535 credits because we only have a
  50. * 16 bit completion counter.
  51. */
  52. int64_t credits = (num_entries < 65536) ? num_entries : 65535;
  53. memset(dma_queue, 0, sizeof(*dma_queue));
  54. dma_queue->post_region_addr = post_region_addr;
  55. dma_queue->hw_complete_count = 0;
  56. dma_queue->credits_and_next_index = credits << DMA_QUEUE_CREDIT_SHIFT;
  57. }
  58. EXPORT_SYMBOL_GPL(__gxio_dma_queue_init);
  59. void __gxio_dma_queue_update_credits(__gxio_dma_queue_t *dma_queue)
  60. {
  61. __gxio_ring_t val;
  62. uint64_t count;
  63. uint64_t delta;
  64. uint64_t new_count;
  65. /*
  66. * Read the 64-bit completion count without touching the cache, so
  67. * we later avoid having to evict any sharers of this cache line
  68. * when we update it below.
  69. */
  70. uint64_t orig_hw_complete_count =
  71. cmpxchg(&dma_queue->hw_complete_count,
  72. -1, -1);
  73. /* Make sure the load completes before we access the hardware. */
  74. wait_for_value(orig_hw_complete_count);
  75. /* Read the 16-bit count of how many packets it has completed. */
  76. val.word = __gxio_mmio_read(dma_queue->post_region_addr);
  77. count = val.count;
  78. /*
  79. * Calculate the number of completions since we last updated the
  80. * 64-bit counter. It's safe to ignore the high bits because the
  81. * maximum credit value is 65535.
  82. */
  83. delta = (count - orig_hw_complete_count) & 0xffff;
  84. if (delta == 0)
  85. return;
  86. /*
  87. * Try to write back the count, advanced by delta. If we race with
  88. * another thread, this might fail, in which case we return
  89. * immediately on the assumption that some credits are (or at least
  90. * were) available.
  91. */
  92. new_count = orig_hw_complete_count + delta;
  93. if (cmpxchg(&dma_queue->hw_complete_count,
  94. orig_hw_complete_count,
  95. new_count) != orig_hw_complete_count)
  96. return;
  97. /*
  98. * We succeeded in advancing the completion count; add back the
  99. * corresponding number of egress credits.
  100. */
  101. __insn_fetchadd(&dma_queue->credits_and_next_index,
  102. (delta << DMA_QUEUE_CREDIT_SHIFT));
  103. }
  104. EXPORT_SYMBOL_GPL(__gxio_dma_queue_update_credits);
  105. /*
  106. * A separate 'blocked' method for put() so that backtraces and
  107. * profiles will clearly indicate that we're wasting time spinning on
  108. * egress availability rather than actually posting commands.
  109. */
  110. int64_t __gxio_dma_queue_wait_for_credits(__gxio_dma_queue_t *dma_queue,
  111. int64_t modifier)
  112. {
  113. int backoff = 16;
  114. int64_t old;
  115. do {
  116. int i;
  117. /* Back off to avoid spamming memory networks. */
  118. for (i = backoff; i > 0; i--)
  119. __insn_mfspr(SPR_PASS);
  120. /* Check credits again. */
  121. __gxio_dma_queue_update_credits(dma_queue);
  122. old = __insn_fetchaddgez(&dma_queue->credits_and_next_index,
  123. modifier);
  124. /* Calculate bounded exponential backoff for next iteration. */
  125. if (backoff < 256)
  126. backoff *= 2;
  127. } while (old + modifier < 0);
  128. return old;
  129. }
  130. EXPORT_SYMBOL_GPL(__gxio_dma_queue_wait_for_credits);
  131. int64_t __gxio_dma_queue_reserve_aux(__gxio_dma_queue_t *dma_queue,
  132. unsigned int num, int wait)
  133. {
  134. return __gxio_dma_queue_reserve(dma_queue, num, wait != 0, true);
  135. }
  136. EXPORT_SYMBOL_GPL(__gxio_dma_queue_reserve_aux);
  137. int __gxio_dma_queue_is_complete(__gxio_dma_queue_t *dma_queue,
  138. int64_t completion_slot, int update)
  139. {
  140. if (update) {
  141. if (ACCESS_ONCE(dma_queue->hw_complete_count) >
  142. completion_slot)
  143. return 1;
  144. __gxio_dma_queue_update_credits(dma_queue);
  145. }
  146. return ACCESS_ONCE(dma_queue->hw_complete_count) > completion_slot;
  147. }
  148. EXPORT_SYMBOL_GPL(__gxio_dma_queue_is_complete);