dma-contiguous.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164
  1. #ifndef __LINUX_CMA_H
  2. #define __LINUX_CMA_H
  3. /*
  4. * Contiguous Memory Allocator for DMA mapping framework
  5. * Copyright (c) 2010-2011 by Samsung Electronics.
  6. * Written by:
  7. * Marek Szyprowski <m.szyprowski@samsung.com>
  8. * Michal Nazarewicz <mina86@mina86.com>
  9. *
  10. * This program is free software; you can redistribute it and/or
  11. * modify it under the terms of the GNU General Public License as
  12. * published by the Free Software Foundation; either version 2 of the
  13. * License or (at your optional) any later version of the license.
  14. */
  15. /*
  16. * Contiguous Memory Allocator
  17. *
  18. * The Contiguous Memory Allocator (CMA) makes it possible to
  19. * allocate big contiguous chunks of memory after the system has
  20. * booted.
  21. *
  22. * Why is it needed?
  23. *
  24. * Various devices on embedded systems have no scatter-getter and/or
  25. * IO map support and require contiguous blocks of memory to
  26. * operate. They include devices such as cameras, hardware video
  27. * coders, etc.
  28. *
  29. * Such devices often require big memory buffers (a full HD frame
  30. * is, for instance, more then 2 mega pixels large, i.e. more than 6
  31. * MB of memory), which makes mechanisms such as kmalloc() or
  32. * alloc_page() ineffective.
  33. *
  34. * At the same time, a solution where a big memory region is
  35. * reserved for a device is suboptimal since often more memory is
  36. * reserved then strictly required and, moreover, the memory is
  37. * inaccessible to page system even if device drivers don't use it.
  38. *
  39. * CMA tries to solve this issue by operating on memory regions
  40. * where only movable pages can be allocated from. This way, kernel
  41. * can use the memory for pagecache and when device driver requests
  42. * it, allocated pages can be migrated.
  43. *
  44. * Driver usage
  45. *
  46. * CMA should not be used by the device drivers directly. It is
  47. * only a helper framework for dma-mapping subsystem.
  48. *
  49. * For more information, see kernel-docs in drivers/base/dma-contiguous.c
  50. */
  51. #ifdef __KERNEL__
  52. #include <linux/device.h>
  53. struct cma;
  54. struct page;
  55. #ifdef CONFIG_DMA_CMA
  56. extern struct cma *dma_contiguous_default_area;
  57. static inline struct cma *dev_get_cma_area(struct device *dev)
  58. {
  59. if (dev && dev->cma_area)
  60. return dev->cma_area;
  61. return dma_contiguous_default_area;
  62. }
  63. static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
  64. {
  65. if (dev)
  66. dev->cma_area = cma;
  67. }
  68. static inline void dma_contiguous_set_default(struct cma *cma)
  69. {
  70. dma_contiguous_default_area = cma;
  71. }
  72. void dma_contiguous_reserve(phys_addr_t addr_limit);
  73. int __init dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
  74. phys_addr_t limit, struct cma **res_cma,
  75. bool fixed);
  76. /**
  77. * dma_declare_contiguous() - reserve area for contiguous memory handling
  78. * for particular device
  79. * @dev: Pointer to device structure.
  80. * @size: Size of the reserved memory.
  81. * @base: Start address of the reserved memory (optional, 0 for any).
  82. * @limit: End address of the reserved memory (optional, 0 for any).
  83. *
  84. * This function reserves memory for specified device. It should be
  85. * called by board specific code when early allocator (memblock or bootmem)
  86. * is still activate.
  87. */
  88. static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
  89. phys_addr_t base, phys_addr_t limit)
  90. {
  91. struct cma *cma;
  92. int ret;
  93. ret = dma_contiguous_reserve_area(size, base, limit, &cma, true);
  94. if (ret == 0)
  95. dev_set_cma_area(dev, cma);
  96. return ret;
  97. }
  98. struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
  99. unsigned int order);
  100. bool dma_release_from_contiguous(struct device *dev, struct page *pages,
  101. int count);
  102. #else
  103. static inline struct cma *dev_get_cma_area(struct device *dev)
  104. {
  105. return NULL;
  106. }
  107. static inline void dev_set_cma_area(struct device *dev, struct cma *cma) { }
  108. static inline void dma_contiguous_set_default(struct cma *cma) { }
  109. static inline void dma_contiguous_reserve(phys_addr_t limit) { }
  110. static inline int dma_contiguous_reserve_area(phys_addr_t size, phys_addr_t base,
  111. phys_addr_t limit, struct cma **res_cma,
  112. bool fixed)
  113. {
  114. return -ENOSYS;
  115. }
  116. static inline
  117. int dma_declare_contiguous(struct device *dev, phys_addr_t size,
  118. phys_addr_t base, phys_addr_t limit)
  119. {
  120. return -ENOSYS;
  121. }
  122. static inline
  123. struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
  124. unsigned int order)
  125. {
  126. return NULL;
  127. }
  128. static inline
  129. bool dma_release_from_contiguous(struct device *dev, struct page *pages,
  130. int count)
  131. {
  132. return false;
  133. }
  134. #endif
  135. #endif
  136. #endif