123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995 |
- /*
- * SWIOTLB-based DMA API implementation
- *
- * Copyright (C) 2012 ARM Ltd.
- * Author: Catalin Marinas <catalin.marinas@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
- #include <linux/gfp.h>
- #include <linux/acpi.h>
- #include <linux/export.h>
- #include <linux/slab.h>
- #include <linux/genalloc.h>
- #include <linux/dma-mapping.h>
- #include <linux/dma-contiguous.h>
- #include <linux/vmalloc.h>
- #include <linux/swiotlb.h>
- #include <asm/cacheflush.h>
- static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
- bool coherent)
- {
- if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
- return pgprot_writecombine(prot);
- return prot;
- }
- static struct gen_pool *atomic_pool;
- #define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
- static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
- static int __init early_coherent_pool(char *p)
- {
- atomic_pool_size = memparse(p, &p);
- return 0;
- }
- early_param("coherent_pool", early_coherent_pool);
- static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
- {
- unsigned long val;
- void *ptr = NULL;
- if (!atomic_pool) {
- WARN(1, "coherent pool not initialised!\n");
- return NULL;
- }
- val = gen_pool_alloc(atomic_pool, size);
- if (val) {
- phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
- *ret_page = phys_to_page(phys);
- ptr = (void *)val;
- memset(ptr, 0, size);
- }
- return ptr;
- }
- static bool __in_atomic_pool(void *start, size_t size)
- {
- return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
- }
- static int __free_from_pool(void *start, size_t size)
- {
- if (!__in_atomic_pool(start, size))
- return 0;
- gen_pool_free(atomic_pool, (unsigned long)start, size);
- return 1;
- }
- static void *__dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
- {
- if (dev == NULL) {
- WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
- return NULL;
- }
- if (IS_ENABLED(CONFIG_ZONE_DMA) &&
- dev->coherent_dma_mask <= DMA_BIT_MASK(32))
- flags |= GFP_DMA;
- if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
- struct page *page;
- void *addr;
- page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
- get_order(size));
- if (!page)
- return NULL;
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- addr = page_address(page);
- memset(addr, 0, size);
- return addr;
- } else {
- return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
- }
- }
- static void __dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
- {
- bool freed;
- phys_addr_t paddr = dma_to_phys(dev, dma_handle);
- if (dev == NULL) {
- WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
- return;
- }
- freed = dma_release_from_contiguous(dev,
- phys_to_page(paddr),
- size >> PAGE_SHIFT);
- if (!freed)
- swiotlb_free_coherent(dev, size, vaddr, dma_handle);
- }
- static void *__dma_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
- {
- struct page *page;
- void *ptr, *coherent_ptr;
- bool coherent = is_device_dma_coherent(dev);
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
- size = PAGE_ALIGN(size);
- if (!coherent && !gfpflags_allow_blocking(flags)) {
- struct page *page = NULL;
- void *addr = __alloc_from_pool(size, &page, flags);
- if (addr)
- *dma_handle = phys_to_dma(dev, page_to_phys(page));
- return addr;
- }
- ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
- if (!ptr)
- goto no_mem;
- /* no need for non-cacheable mapping if coherent */
- if (coherent)
- return ptr;
- /* remove any dirty cache lines on the kernel alias */
- __dma_flush_range(ptr, ptr + size);
- /* create a coherent mapping */
- page = virt_to_page(ptr);
- coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
- prot, NULL);
- if (!coherent_ptr)
- goto no_map;
- return coherent_ptr;
- no_map:
- __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
- no_mem:
- *dma_handle = DMA_ERROR_CODE;
- return NULL;
- }
- static void __dma_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
- {
- void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
- size = PAGE_ALIGN(size);
- if (!is_device_dma_coherent(dev)) {
- if (__free_from_pool(vaddr, size))
- return;
- vunmap(vaddr);
- }
- __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
- }
- static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- dma_addr_t dev_addr;
- dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
- if (!is_device_dma_coherent(dev))
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
- return dev_addr;
- }
- static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- if (!is_device_dma_coherent(dev))
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
- swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
- }
- static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- struct scatterlist *sg;
- int i, ret;
- ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
- if (!is_device_dma_coherent(dev))
- for_each_sg(sgl, sg, ret, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
- return ret;
- }
- static void __swiotlb_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- struct scatterlist *sg;
- int i;
- if (!is_device_dma_coherent(dev))
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
- swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
- }
- static void __swiotlb_sync_single_for_cpu(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
- {
- if (!is_device_dma_coherent(dev))
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
- swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
- }
- static void __swiotlb_sync_single_for_device(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
- {
- swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
- if (!is_device_dma_coherent(dev))
- __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
- }
- static void __swiotlb_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
- {
- struct scatterlist *sg;
- int i;
- if (!is_device_dma_coherent(dev))
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
- swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
- }
- static void __swiotlb_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
- {
- struct scatterlist *sg;
- int i;
- swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
- if (!is_device_dma_coherent(dev))
- for_each_sg(sgl, sg, nelems, i)
- __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
- sg->length, dir);
- }
- static int __swiotlb_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
- {
- int ret = -ENXIO;
- unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
- PAGE_SHIFT;
- unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
- unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
- unsigned long off = vma->vm_pgoff;
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
- if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
- ret = remap_pfn_range(vma, vma->vm_start,
- pfn + off,
- vma->vm_end - vma->vm_start,
- vma->vm_page_prot);
- }
- return ret;
- }
- static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t handle, size_t size,
- struct dma_attrs *attrs)
- {
- int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
- if (!ret)
- sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
- PAGE_ALIGN(size), 0);
- return ret;
- }
- static struct dma_map_ops swiotlb_dma_ops = {
- .alloc = __dma_alloc,
- .free = __dma_free,
- .mmap = __swiotlb_mmap,
- .get_sgtable = __swiotlb_get_sgtable,
- .map_page = __swiotlb_map_page,
- .unmap_page = __swiotlb_unmap_page,
- .map_sg = __swiotlb_map_sg_attrs,
- .unmap_sg = __swiotlb_unmap_sg_attrs,
- .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
- .sync_single_for_device = __swiotlb_sync_single_for_device,
- .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
- .sync_sg_for_device = __swiotlb_sync_sg_for_device,
- .dma_supported = swiotlb_dma_supported,
- .mapping_error = swiotlb_dma_mapping_error,
- };
- static int __init atomic_pool_init(void)
- {
- pgprot_t prot = __pgprot(PROT_NORMAL_NC);
- unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
- struct page *page;
- void *addr;
- unsigned int pool_size_order = get_order(atomic_pool_size);
- if (dev_get_cma_area(NULL))
- page = dma_alloc_from_contiguous(NULL, nr_pages,
- pool_size_order);
- else
- page = alloc_pages(GFP_DMA, pool_size_order);
- if (page) {
- int ret;
- void *page_addr = page_address(page);
- memset(page_addr, 0, atomic_pool_size);
- __dma_flush_range(page_addr, page_addr + atomic_pool_size);
- atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
- if (!atomic_pool)
- goto free_page;
- addr = dma_common_contiguous_remap(page, atomic_pool_size,
- VM_USERMAP, prot, atomic_pool_init);
- if (!addr)
- goto destroy_genpool;
- ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
- page_to_phys(page),
- atomic_pool_size, -1);
- if (ret)
- goto remove_mapping;
- gen_pool_set_algo(atomic_pool,
- gen_pool_first_fit_order_align,
- (void *)PAGE_SHIFT);
- pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
- atomic_pool_size / 1024);
- return 0;
- }
- goto out;
- remove_mapping:
- dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
- destroy_genpool:
- gen_pool_destroy(atomic_pool);
- atomic_pool = NULL;
- free_page:
- if (!dma_release_from_contiguous(NULL, page, nr_pages))
- __free_pages(page, pool_size_order);
- out:
- pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
- atomic_pool_size / 1024);
- return -ENOMEM;
- }
- /********************************************
- * The following APIs are for dummy DMA ops *
- ********************************************/
- static void *__dummy_alloc(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t flags,
- struct dma_attrs *attrs)
- {
- return NULL;
- }
- static void __dummy_free(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle,
- struct dma_attrs *attrs)
- {
- }
- static int __dummy_mmap(struct device *dev,
- struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
- {
- return -ENXIO;
- }
- static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- return DMA_ERROR_CODE;
- }
- static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- }
- static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- return 0;
- }
- static void __dummy_unmap_sg(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- }
- static void __dummy_sync_single(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
- {
- }
- static void __dummy_sync_sg(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
- {
- }
- static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
- {
- return 1;
- }
- static int __dummy_dma_supported(struct device *hwdev, u64 mask)
- {
- return 0;
- }
- struct dma_map_ops dummy_dma_ops = {
- .alloc = __dummy_alloc,
- .free = __dummy_free,
- .mmap = __dummy_mmap,
- .map_page = __dummy_map_page,
- .unmap_page = __dummy_unmap_page,
- .map_sg = __dummy_map_sg,
- .unmap_sg = __dummy_unmap_sg,
- .sync_single_for_cpu = __dummy_sync_single,
- .sync_single_for_device = __dummy_sync_single,
- .sync_sg_for_cpu = __dummy_sync_sg,
- .sync_sg_for_device = __dummy_sync_sg,
- .mapping_error = __dummy_mapping_error,
- .dma_supported = __dummy_dma_supported,
- };
- EXPORT_SYMBOL(dummy_dma_ops);
- static int __init arm64_dma_init(void)
- {
- return atomic_pool_init();
- }
- arch_initcall(arm64_dma_init);
- #define PREALLOC_DMA_DEBUG_ENTRIES 4096
- static int __init dma_debug_do_init(void)
- {
- dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
- return 0;
- }
- fs_initcall(dma_debug_do_init);
- #ifdef CONFIG_IOMMU_DMA
- #include <linux/dma-iommu.h>
- #include <linux/platform_device.h>
- #include <linux/amba/bus.h>
- /* Thankfully, all cache ops are by VA so we can ignore phys here */
- static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
- {
- __dma_flush_range(virt, virt + PAGE_SIZE);
- }
- static void *__iommu_alloc_attrs(struct device *dev, size_t size,
- dma_addr_t *handle, gfp_t gfp,
- struct dma_attrs *attrs)
- {
- bool coherent = is_device_dma_coherent(dev);
- int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
- size_t iosize = size;
- void *addr;
- if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
- return NULL;
- size = PAGE_ALIGN(size);
- /*
- * Some drivers rely on this, and we probably don't want the
- * possibility of stale kernel data being read by devices anyway.
- */
- gfp |= __GFP_ZERO;
- if (gfpflags_allow_blocking(gfp)) {
- struct page **pages;
- pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
- pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
- flush_page);
- if (!pages)
- return NULL;
- addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
- __builtin_return_address(0));
- if (!addr)
- iommu_dma_free(dev, pages, iosize, handle);
- } else {
- struct page *page;
- /*
- * In atomic context we can't remap anything, so we'll only
- * get the virtually contiguous buffer we need by way of a
- * physically contiguous allocation.
- */
- if (coherent) {
- page = alloc_pages(gfp, get_order(size));
- addr = page ? page_address(page) : NULL;
- } else {
- addr = __alloc_from_pool(size, &page, gfp);
- }
- if (!addr)
- return NULL;
- *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
- if (iommu_dma_mapping_error(dev, *handle)) {
- if (coherent)
- __free_pages(page, get_order(size));
- else
- __free_from_pool(addr, size);
- addr = NULL;
- }
- }
- return addr;
- }
- static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
- dma_addr_t handle, struct dma_attrs *attrs)
- {
- size_t iosize = size;
- size = PAGE_ALIGN(size);
- /*
- * @cpu_addr will be one of 3 things depending on how it was allocated:
- * - A remapped array of pages from iommu_dma_alloc(), for all
- * non-atomic allocations.
- * - A non-cacheable alias from the atomic pool, for atomic
- * allocations by non-coherent devices.
- * - A normal lowmem address, for atomic allocations by
- * coherent devices.
- * Hence how dodgy the below logic looks...
- */
- if (__in_atomic_pool(cpu_addr, size)) {
- iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
- __free_from_pool(cpu_addr, size);
- } else if (is_vmalloc_addr(cpu_addr)){
- struct vm_struct *area = find_vm_area(cpu_addr);
- if (WARN_ON(!area || !area->pages))
- return;
- iommu_dma_free(dev, area->pages, iosize, &handle);
- dma_common_free_remap(cpu_addr, size, VM_USERMAP);
- } else {
- iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
- __free_pages(virt_to_page(cpu_addr), get_order(size));
- }
- }
- static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
- void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
- {
- struct vm_struct *area;
- int ret;
- vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
- is_device_dma_coherent(dev));
- if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
- return ret;
- area = find_vm_area(cpu_addr);
- if (WARN_ON(!area || !area->pages))
- return -ENXIO;
- return iommu_dma_mmap(area->pages, size, vma);
- }
- static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
- void *cpu_addr, dma_addr_t dma_addr,
- size_t size, struct dma_attrs *attrs)
- {
- unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
- struct vm_struct *area = find_vm_area(cpu_addr);
- if (WARN_ON(!area || !area->pages))
- return -ENXIO;
- return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
- GFP_KERNEL);
- }
- static void __iommu_sync_single_for_cpu(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
- {
- phys_addr_t phys;
- if (is_device_dma_coherent(dev))
- return;
- phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
- __dma_unmap_area(phys_to_virt(phys), size, dir);
- }
- static void __iommu_sync_single_for_device(struct device *dev,
- dma_addr_t dev_addr, size_t size,
- enum dma_data_direction dir)
- {
- phys_addr_t phys;
- if (is_device_dma_coherent(dev))
- return;
- phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
- __dma_map_area(phys_to_virt(phys), size, dir);
- }
- static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
- unsigned long offset, size_t size,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- bool coherent = is_device_dma_coherent(dev);
- int prot = dma_direction_to_prot(dir, coherent);
- dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
- if (!iommu_dma_mapping_error(dev, dev_addr) &&
- !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
- __iommu_sync_single_for_device(dev, dev_addr, size, dir);
- return dev_addr;
- }
- static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
- size_t size, enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
- __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
- iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
- }
- static void __iommu_sync_sg_for_cpu(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
- {
- struct scatterlist *sg;
- int i;
- if (is_device_dma_coherent(dev))
- return;
- for_each_sg(sgl, sg, nelems, i)
- __dma_unmap_area(sg_virt(sg), sg->length, dir);
- }
- static void __iommu_sync_sg_for_device(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir)
- {
- struct scatterlist *sg;
- int i;
- if (is_device_dma_coherent(dev))
- return;
- for_each_sg(sgl, sg, nelems, i)
- __dma_map_area(sg_virt(sg), sg->length, dir);
- }
- static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
- int nelems, enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- bool coherent = is_device_dma_coherent(dev);
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
- __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
- return iommu_dma_map_sg(dev, sgl, nelems,
- dma_direction_to_prot(dir, coherent));
- }
- static void __iommu_unmap_sg_attrs(struct device *dev,
- struct scatterlist *sgl, int nelems,
- enum dma_data_direction dir,
- struct dma_attrs *attrs)
- {
- if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
- __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
- iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
- }
- static struct dma_map_ops iommu_dma_ops = {
- .alloc = __iommu_alloc_attrs,
- .free = __iommu_free_attrs,
- .mmap = __iommu_mmap_attrs,
- .get_sgtable = __iommu_get_sgtable,
- .map_page = __iommu_map_page,
- .unmap_page = __iommu_unmap_page,
- .map_sg = __iommu_map_sg_attrs,
- .unmap_sg = __iommu_unmap_sg_attrs,
- .sync_single_for_cpu = __iommu_sync_single_for_cpu,
- .sync_single_for_device = __iommu_sync_single_for_device,
- .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
- .sync_sg_for_device = __iommu_sync_sg_for_device,
- .dma_supported = iommu_dma_supported,
- .mapping_error = iommu_dma_mapping_error,
- };
- /*
- * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
- * everything it needs to - the device is only partially created and the
- * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
- * need this delayed attachment dance. Once IOMMU probe ordering is sorted
- * to move the arch_setup_dma_ops() call later, all the notifier bits below
- * become unnecessary, and will go away.
- */
- struct iommu_dma_notifier_data {
- struct list_head list;
- struct device *dev;
- const struct iommu_ops *ops;
- u64 dma_base;
- u64 size;
- };
- static LIST_HEAD(iommu_dma_masters);
- static DEFINE_MUTEX(iommu_dma_notifier_lock);
- /*
- * Temporarily "borrow" a domain feature flag to to tell if we had to resort
- * to creating our own domain here, in case we need to clean it up again.
- */
- #define __IOMMU_DOMAIN_FAKE_DEFAULT (1U << 31)
- static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
- u64 dma_base, u64 size)
- {
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- /*
- * Best case: The device is either part of a group which was
- * already attached to a domain in a previous call, or it's
- * been put in a default DMA domain by the IOMMU core.
- */
- if (!domain) {
- /*
- * Urgh. The IOMMU core isn't going to do default domains
- * for non-PCI devices anyway, until it has some means of
- * abstracting the entirely implementation-specific
- * sideband data/SoC topology/unicorn dust that may or
- * may not differentiate upstream masters.
- * So until then, HORRIBLE HACKS!
- */
- domain = ops->domain_alloc(IOMMU_DOMAIN_DMA);
- if (!domain)
- goto out_no_domain;
- domain->ops = ops;
- domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT;
- if (iommu_attach_device(domain, dev))
- goto out_put_domain;
- }
- if (iommu_dma_init_domain(domain, dma_base, size))
- goto out_detach;
- dev->archdata.dma_ops = &iommu_dma_ops;
- return true;
- out_detach:
- iommu_detach_device(domain, dev);
- out_put_domain:
- if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
- iommu_domain_free(domain);
- out_no_domain:
- pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
- dev_name(dev));
- return false;
- }
- static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
- u64 dma_base, u64 size)
- {
- struct iommu_dma_notifier_data *iommudata;
- iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
- if (!iommudata)
- return;
- iommudata->dev = dev;
- iommudata->ops = ops;
- iommudata->dma_base = dma_base;
- iommudata->size = size;
- mutex_lock(&iommu_dma_notifier_lock);
- list_add(&iommudata->list, &iommu_dma_masters);
- mutex_unlock(&iommu_dma_notifier_lock);
- }
- static int __iommu_attach_notifier(struct notifier_block *nb,
- unsigned long action, void *data)
- {
- struct iommu_dma_notifier_data *master, *tmp;
- if (action != BUS_NOTIFY_ADD_DEVICE)
- return 0;
- mutex_lock(&iommu_dma_notifier_lock);
- list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
- if (do_iommu_attach(master->dev, master->ops,
- master->dma_base, master->size)) {
- list_del(&master->list);
- kfree(master);
- }
- }
- mutex_unlock(&iommu_dma_notifier_lock);
- return 0;
- }
- static int register_iommu_dma_ops_notifier(struct bus_type *bus)
- {
- struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
- int ret;
- if (!nb)
- return -ENOMEM;
- /*
- * The device must be attached to a domain before the driver probe
- * routine gets a chance to start allocating DMA buffers. However,
- * the IOMMU driver also needs a chance to configure the iommu_group
- * via its add_device callback first, so we need to make the attach
- * happen between those two points. Since the IOMMU core uses a bus
- * notifier with default priority for add_device, do the same but
- * with a lower priority to ensure the appropriate ordering.
- */
- nb->notifier_call = __iommu_attach_notifier;
- nb->priority = -100;
- ret = bus_register_notifier(bus, nb);
- if (ret) {
- pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
- bus->name);
- kfree(nb);
- }
- return ret;
- }
- static int __init __iommu_dma_init(void)
- {
- int ret;
- ret = iommu_dma_init();
- if (!ret)
- ret = register_iommu_dma_ops_notifier(&platform_bus_type);
- if (!ret)
- ret = register_iommu_dma_ops_notifier(&amba_bustype);
- /* handle devices queued before this arch_initcall */
- if (!ret)
- __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
- return ret;
- }
- arch_initcall(__iommu_dma_init);
- static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- const struct iommu_ops *ops)
- {
- struct iommu_group *group;
- if (!ops)
- return;
- /*
- * TODO: As a concession to the future, we're ready to handle being
- * called both early and late (i.e. after bus_add_device). Once all
- * the platform bus code is reworked to call us late and the notifier
- * junk above goes away, move the body of do_iommu_attach here.
- */
- group = iommu_group_get(dev);
- if (group) {
- do_iommu_attach(dev, ops, dma_base, size);
- iommu_group_put(group);
- } else {
- queue_iommu_attach(dev, ops, dma_base, size);
- }
- }
- void arch_teardown_dma_ops(struct device *dev)
- {
- struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
- if (domain) {
- iommu_detach_device(domain, dev);
- if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
- iommu_domain_free(domain);
- }
- dev->archdata.dma_ops = NULL;
- }
- #else
- static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu)
- { }
- #endif /* CONFIG_IOMMU_DMA */
- void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
- struct iommu_ops *iommu, bool coherent)
- {
- if (!dev->archdata.dma_ops)
- dev->archdata.dma_ops = &swiotlb_dma_ops;
- dev->archdata.dma_coherent = coherent;
- __iommu_setup_dma_ops(dev, dma_base, size, iommu);
- }
|