async_tx.h 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /*
  2. * Copyright © 2006, Intel Corporation.
  3. *
  4. * This program is free software; you can redistribute it and/or modify it
  5. * under the terms and conditions of the GNU General Public License,
  6. * version 2, as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope it will be useful, but WITHOUT
  9. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  11. * more details.
  12. *
  13. * You should have received a copy of the GNU General Public License along with
  14. * this program; if not, write to the Free Software Foundation, Inc.,
  15. * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
  16. *
  17. */
  18. #ifndef _ASYNC_TX_H_
  19. #define _ASYNC_TX_H_
  20. #include <linux/dmaengine.h>
  21. #include <linux/spinlock.h>
  22. #include <linux/interrupt.h>
  23. /* on architectures without dma-mapping capabilities we need to ensure
  24. * that the asynchronous path compiles away
  25. */
  26. #ifdef CONFIG_HAS_DMA
  27. #define __async_inline
  28. #else
  29. #define __async_inline __always_inline
  30. #endif
  31. /**
  32. * dma_chan_ref - object used to manage dma channels received from the
  33. * dmaengine core.
  34. * @chan - the channel being tracked
  35. * @node - node for the channel to be placed on async_tx_master_list
  36. * @rcu - for list_del_rcu
  37. * @count - number of times this channel is listed in the pool
  38. * (for channels with multiple capabiities)
  39. */
  40. struct dma_chan_ref {
  41. struct dma_chan *chan;
  42. struct list_head node;
  43. struct rcu_head rcu;
  44. atomic_t count;
  45. };
  46. /**
  47. * async_tx_flags - modifiers for the async_* calls
  48. * @ASYNC_TX_XOR_ZERO_DST: this flag must be used for xor operations where the
  49. * the destination address is not a source. The asynchronous case handles this
  50. * implicitly, the synchronous case needs to zero the destination block.
  51. * @ASYNC_TX_XOR_DROP_DST: this flag must be used if the destination address is
  52. * also one of the source addresses. In the synchronous case the destination
  53. * address is an implied source, whereas the asynchronous case it must be listed
  54. * as a source. The destination address must be the first address in the source
  55. * array.
  56. * @ASYNC_TX_ACK: immediately ack the descriptor, precludes setting up a
  57. * dependency chain
  58. * @ASYNC_TX_FENCE: specify that the next operation in the dependency
  59. * chain uses this operation's result as an input
  60. * @ASYNC_TX_PQ_XOR_DST: do not overwrite the syndrome but XOR it with the
  61. * input data. Required for rmw case.
  62. */
  63. enum async_tx_flags {
  64. ASYNC_TX_XOR_ZERO_DST = (1 << 0),
  65. ASYNC_TX_XOR_DROP_DST = (1 << 1),
  66. ASYNC_TX_ACK = (1 << 2),
  67. ASYNC_TX_FENCE = (1 << 3),
  68. ASYNC_TX_PQ_XOR_DST = (1 << 4),
  69. };
  70. /**
  71. * struct async_submit_ctl - async_tx submission/completion modifiers
  72. * @flags: submission modifiers
  73. * @depend_tx: parent dependency of the current operation being submitted
  74. * @cb_fn: callback routine to run at operation completion
  75. * @cb_param: parameter for the callback routine
  76. * @scribble: caller provided space for dma/page address conversions
  77. */
  78. struct async_submit_ctl {
  79. enum async_tx_flags flags;
  80. struct dma_async_tx_descriptor *depend_tx;
  81. dma_async_tx_callback cb_fn;
  82. void *cb_param;
  83. void *scribble;
  84. };
  85. #ifdef CONFIG_DMA_ENGINE
  86. #define async_tx_issue_pending_all dma_issue_pending_all
  87. /**
  88. * async_tx_issue_pending - send pending descriptor to the hardware channel
  89. * @tx: descriptor handle to retrieve hardware context
  90. *
  91. * Note: any dependent operations will have already been issued by
  92. * async_tx_channel_switch, or (in the case of no channel switch) will
  93. * be already pending on this channel.
  94. */
  95. static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
  96. {
  97. if (likely(tx)) {
  98. struct dma_chan *chan = tx->chan;
  99. struct dma_device *dma = chan->device;
  100. dma->device_issue_pending(chan);
  101. }
  102. }
  103. #ifdef CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL
  104. #include <asm/async_tx.h>
  105. #else
  106. #define async_tx_find_channel(dep, type, dst, dst_count, src, src_count, len) \
  107. __async_tx_find_channel(dep, type)
  108. struct dma_chan *
  109. __async_tx_find_channel(struct async_submit_ctl *submit,
  110. enum dma_transaction_type tx_type);
  111. #endif /* CONFIG_ARCH_HAS_ASYNC_TX_FIND_CHANNEL */
  112. #else
  113. static inline void async_tx_issue_pending_all(void)
  114. {
  115. do { } while (0);
  116. }
  117. static inline void async_tx_issue_pending(struct dma_async_tx_descriptor *tx)
  118. {
  119. do { } while (0);
  120. }
  121. static inline struct dma_chan *
  122. async_tx_find_channel(struct async_submit_ctl *submit,
  123. enum dma_transaction_type tx_type, struct page **dst,
  124. int dst_count, struct page **src, int src_count,
  125. size_t len)
  126. {
  127. return NULL;
  128. }
  129. #endif
  130. /**
  131. * async_tx_sync_epilog - actions to take if an operation is run synchronously
  132. * @cb_fn: function to call when the transaction completes
  133. * @cb_fn_param: parameter to pass to the callback routine
  134. */
  135. static inline void
  136. async_tx_sync_epilog(struct async_submit_ctl *submit)
  137. {
  138. if (submit->cb_fn)
  139. submit->cb_fn(submit->cb_param);
  140. }
  141. typedef union {
  142. unsigned long addr;
  143. struct page *page;
  144. dma_addr_t dma;
  145. } addr_conv_t;
  146. static inline void
  147. init_async_submit(struct async_submit_ctl *args, enum async_tx_flags flags,
  148. struct dma_async_tx_descriptor *tx,
  149. dma_async_tx_callback cb_fn, void *cb_param,
  150. addr_conv_t *scribble)
  151. {
  152. args->flags = flags;
  153. args->depend_tx = tx;
  154. args->cb_fn = cb_fn;
  155. args->cb_param = cb_param;
  156. args->scribble = scribble;
  157. }
  158. void async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
  159. struct async_submit_ctl *submit);
  160. struct dma_async_tx_descriptor *
  161. async_xor(struct page *dest, struct page **src_list, unsigned int offset,
  162. int src_cnt, size_t len, struct async_submit_ctl *submit);
  163. struct dma_async_tx_descriptor *
  164. async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
  165. int src_cnt, size_t len, enum sum_check_flags *result,
  166. struct async_submit_ctl *submit);
  167. struct dma_async_tx_descriptor *
  168. async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
  169. unsigned int src_offset, size_t len,
  170. struct async_submit_ctl *submit);
  171. struct dma_async_tx_descriptor *async_trigger_callback(struct async_submit_ctl *submit);
  172. struct dma_async_tx_descriptor *
  173. async_gen_syndrome(struct page **blocks, unsigned int offset, int src_cnt,
  174. size_t len, struct async_submit_ctl *submit);
  175. struct dma_async_tx_descriptor *
  176. async_syndrome_val(struct page **blocks, unsigned int offset, int src_cnt,
  177. size_t len, enum sum_check_flags *pqres, struct page *spare,
  178. struct async_submit_ctl *submit);
  179. struct dma_async_tx_descriptor *
  180. async_raid6_2data_recov(int src_num, size_t bytes, int faila, int failb,
  181. struct page **ptrs, struct async_submit_ctl *submit);
  182. struct dma_async_tx_descriptor *
  183. async_raid6_datap_recov(int src_num, size_t bytes, int faila,
  184. struct page **ptrs, struct async_submit_ctl *submit);
  185. void async_tx_quiesce(struct dma_async_tx_descriptor **tx);
  186. #endif /* _ASYNC_TX_H_ */