shmobile-iommu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402
  1. /*
  2. * IOMMU for IPMMU/IPMMUI
  3. * Copyright (C) 2012 Hideki EIRAKU
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; version 2 of the License.
  8. */
  9. #include <linux/dma-mapping.h>
  10. #include <linux/io.h>
  11. #include <linux/iommu.h>
  12. #include <linux/platform_device.h>
  13. #include <linux/sizes.h>
  14. #include <linux/slab.h>
  15. #include <asm/dma-iommu.h>
  16. #include "shmobile-ipmmu.h"
  17. #define L1_SIZE CONFIG_SHMOBILE_IOMMU_L1SIZE
  18. #define L1_LEN (L1_SIZE / 4)
  19. #define L1_ALIGN L1_SIZE
  20. #define L2_SIZE SZ_1K
  21. #define L2_LEN (L2_SIZE / 4)
  22. #define L2_ALIGN L2_SIZE
  23. struct shmobile_iommu_domain_pgtable {
  24. uint32_t *pgtable;
  25. dma_addr_t handle;
  26. };
  27. struct shmobile_iommu_archdata {
  28. struct list_head attached_list;
  29. struct dma_iommu_mapping *iommu_mapping;
  30. spinlock_t attach_lock;
  31. struct shmobile_iommu_domain *attached;
  32. int num_attached_devices;
  33. struct shmobile_ipmmu *ipmmu;
  34. };
  35. struct shmobile_iommu_domain {
  36. struct shmobile_iommu_domain_pgtable l1, l2[L1_LEN];
  37. spinlock_t map_lock;
  38. spinlock_t attached_list_lock;
  39. struct list_head attached_list;
  40. struct iommu_domain domain;
  41. };
  42. static struct shmobile_iommu_archdata *ipmmu_archdata;
  43. static struct kmem_cache *l1cache, *l2cache;
  44. static struct shmobile_iommu_domain *to_sh_domain(struct iommu_domain *dom)
  45. {
  46. return container_of(dom, struct shmobile_iommu_domain, domain);
  47. }
  48. static int pgtable_alloc(struct shmobile_iommu_domain_pgtable *pgtable,
  49. struct kmem_cache *cache, size_t size)
  50. {
  51. pgtable->pgtable = kmem_cache_zalloc(cache, GFP_ATOMIC);
  52. if (!pgtable->pgtable)
  53. return -ENOMEM;
  54. pgtable->handle = dma_map_single(NULL, pgtable->pgtable, size,
  55. DMA_TO_DEVICE);
  56. return 0;
  57. }
  58. static void pgtable_free(struct shmobile_iommu_domain_pgtable *pgtable,
  59. struct kmem_cache *cache, size_t size)
  60. {
  61. dma_unmap_single(NULL, pgtable->handle, size, DMA_TO_DEVICE);
  62. kmem_cache_free(cache, pgtable->pgtable);
  63. }
  64. static uint32_t pgtable_read(struct shmobile_iommu_domain_pgtable *pgtable,
  65. unsigned int index)
  66. {
  67. return pgtable->pgtable[index];
  68. }
  69. static void pgtable_write(struct shmobile_iommu_domain_pgtable *pgtable,
  70. unsigned int index, unsigned int count, uint32_t val)
  71. {
  72. unsigned int i;
  73. for (i = 0; i < count; i++)
  74. pgtable->pgtable[index + i] = val;
  75. dma_sync_single_for_device(NULL, pgtable->handle + index * sizeof(val),
  76. sizeof(val) * count, DMA_TO_DEVICE);
  77. }
  78. static struct iommu_domain *shmobile_iommu_domain_alloc(unsigned type)
  79. {
  80. struct shmobile_iommu_domain *sh_domain;
  81. int i, ret;
  82. if (type != IOMMU_DOMAIN_UNMANAGED)
  83. return NULL;
  84. sh_domain = kzalloc(sizeof(*sh_domain), GFP_KERNEL);
  85. if (!sh_domain)
  86. return NULL;
  87. ret = pgtable_alloc(&sh_domain->l1, l1cache, L1_SIZE);
  88. if (ret < 0) {
  89. kfree(sh_domain);
  90. return NULL;
  91. }
  92. for (i = 0; i < L1_LEN; i++)
  93. sh_domain->l2[i].pgtable = NULL;
  94. spin_lock_init(&sh_domain->map_lock);
  95. spin_lock_init(&sh_domain->attached_list_lock);
  96. INIT_LIST_HEAD(&sh_domain->attached_list);
  97. return &sh_domain->domain;
  98. }
  99. static void shmobile_iommu_domain_free(struct iommu_domain *domain)
  100. {
  101. struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
  102. int i;
  103. for (i = 0; i < L1_LEN; i++) {
  104. if (sh_domain->l2[i].pgtable)
  105. pgtable_free(&sh_domain->l2[i], l2cache, L2_SIZE);
  106. }
  107. pgtable_free(&sh_domain->l1, l1cache, L1_SIZE);
  108. kfree(sh_domain);
  109. }
  110. static int shmobile_iommu_attach_device(struct iommu_domain *domain,
  111. struct device *dev)
  112. {
  113. struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
  114. struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
  115. int ret = -EBUSY;
  116. if (!archdata)
  117. return -ENODEV;
  118. spin_lock(&sh_domain->attached_list_lock);
  119. spin_lock(&archdata->attach_lock);
  120. if (archdata->attached != sh_domain) {
  121. if (archdata->attached)
  122. goto err;
  123. ipmmu_tlb_set(archdata->ipmmu, sh_domain->l1.handle, L1_SIZE,
  124. 0);
  125. ipmmu_tlb_flush(archdata->ipmmu);
  126. archdata->attached = sh_domain;
  127. archdata->num_attached_devices = 0;
  128. list_add(&archdata->attached_list, &sh_domain->attached_list);
  129. }
  130. archdata->num_attached_devices++;
  131. ret = 0;
  132. err:
  133. spin_unlock(&archdata->attach_lock);
  134. spin_unlock(&sh_domain->attached_list_lock);
  135. return ret;
  136. }
  137. static void shmobile_iommu_detach_device(struct iommu_domain *domain,
  138. struct device *dev)
  139. {
  140. struct shmobile_iommu_archdata *archdata = dev->archdata.iommu;
  141. struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
  142. if (!archdata)
  143. return;
  144. spin_lock(&sh_domain->attached_list_lock);
  145. spin_lock(&archdata->attach_lock);
  146. archdata->num_attached_devices--;
  147. if (!archdata->num_attached_devices) {
  148. ipmmu_tlb_set(archdata->ipmmu, 0, 0, 0);
  149. ipmmu_tlb_flush(archdata->ipmmu);
  150. archdata->attached = NULL;
  151. list_del(&archdata->attached_list);
  152. }
  153. spin_unlock(&archdata->attach_lock);
  154. spin_unlock(&sh_domain->attached_list_lock);
  155. }
  156. static void domain_tlb_flush(struct shmobile_iommu_domain *sh_domain)
  157. {
  158. struct shmobile_iommu_archdata *archdata;
  159. spin_lock(&sh_domain->attached_list_lock);
  160. list_for_each_entry(archdata, &sh_domain->attached_list, attached_list)
  161. ipmmu_tlb_flush(archdata->ipmmu);
  162. spin_unlock(&sh_domain->attached_list_lock);
  163. }
  164. static int l2alloc(struct shmobile_iommu_domain *sh_domain,
  165. unsigned int l1index)
  166. {
  167. int ret;
  168. if (!sh_domain->l2[l1index].pgtable) {
  169. ret = pgtable_alloc(&sh_domain->l2[l1index], l2cache, L2_SIZE);
  170. if (ret < 0)
  171. return ret;
  172. }
  173. pgtable_write(&sh_domain->l1, l1index, 1,
  174. sh_domain->l2[l1index].handle | 0x1);
  175. return 0;
  176. }
  177. static void l2realfree(struct shmobile_iommu_domain_pgtable *l2)
  178. {
  179. if (l2->pgtable)
  180. pgtable_free(l2, l2cache, L2_SIZE);
  181. }
  182. static void l2free(struct shmobile_iommu_domain *sh_domain,
  183. unsigned int l1index,
  184. struct shmobile_iommu_domain_pgtable *l2)
  185. {
  186. pgtable_write(&sh_domain->l1, l1index, 1, 0);
  187. if (sh_domain->l2[l1index].pgtable) {
  188. *l2 = sh_domain->l2[l1index];
  189. sh_domain->l2[l1index].pgtable = NULL;
  190. }
  191. }
  192. static int shmobile_iommu_map(struct iommu_domain *domain, unsigned long iova,
  193. phys_addr_t paddr, size_t size, int prot)
  194. {
  195. struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
  196. struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
  197. unsigned int l1index, l2index;
  198. int ret;
  199. l1index = iova >> 20;
  200. switch (size) {
  201. case SZ_4K:
  202. l2index = (iova >> 12) & 0xff;
  203. spin_lock(&sh_domain->map_lock);
  204. ret = l2alloc(sh_domain, l1index);
  205. if (!ret)
  206. pgtable_write(&sh_domain->l2[l1index], l2index, 1,
  207. paddr | 0xff2);
  208. spin_unlock(&sh_domain->map_lock);
  209. break;
  210. case SZ_64K:
  211. l2index = (iova >> 12) & 0xf0;
  212. spin_lock(&sh_domain->map_lock);
  213. ret = l2alloc(sh_domain, l1index);
  214. if (!ret)
  215. pgtable_write(&sh_domain->l2[l1index], l2index, 0x10,
  216. paddr | 0xff1);
  217. spin_unlock(&sh_domain->map_lock);
  218. break;
  219. case SZ_1M:
  220. spin_lock(&sh_domain->map_lock);
  221. l2free(sh_domain, l1index, &l2);
  222. pgtable_write(&sh_domain->l1, l1index, 1, paddr | 0xc02);
  223. spin_unlock(&sh_domain->map_lock);
  224. ret = 0;
  225. break;
  226. default:
  227. ret = -EINVAL;
  228. }
  229. if (!ret)
  230. domain_tlb_flush(sh_domain);
  231. l2realfree(&l2);
  232. return ret;
  233. }
  234. static size_t shmobile_iommu_unmap(struct iommu_domain *domain,
  235. unsigned long iova, size_t size)
  236. {
  237. struct shmobile_iommu_domain_pgtable l2 = { .pgtable = NULL };
  238. struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
  239. unsigned int l1index, l2index;
  240. uint32_t l2entry = 0;
  241. size_t ret = 0;
  242. l1index = iova >> 20;
  243. if (!(iova & 0xfffff) && size >= SZ_1M) {
  244. spin_lock(&sh_domain->map_lock);
  245. l2free(sh_domain, l1index, &l2);
  246. spin_unlock(&sh_domain->map_lock);
  247. ret = SZ_1M;
  248. goto done;
  249. }
  250. l2index = (iova >> 12) & 0xff;
  251. spin_lock(&sh_domain->map_lock);
  252. if (sh_domain->l2[l1index].pgtable)
  253. l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
  254. switch (l2entry & 3) {
  255. case 1:
  256. if (l2index & 0xf)
  257. break;
  258. pgtable_write(&sh_domain->l2[l1index], l2index, 0x10, 0);
  259. ret = SZ_64K;
  260. break;
  261. case 2:
  262. pgtable_write(&sh_domain->l2[l1index], l2index, 1, 0);
  263. ret = SZ_4K;
  264. break;
  265. }
  266. spin_unlock(&sh_domain->map_lock);
  267. done:
  268. if (ret)
  269. domain_tlb_flush(sh_domain);
  270. l2realfree(&l2);
  271. return ret;
  272. }
  273. static phys_addr_t shmobile_iommu_iova_to_phys(struct iommu_domain *domain,
  274. dma_addr_t iova)
  275. {
  276. struct shmobile_iommu_domain *sh_domain = to_sh_domain(domain);
  277. uint32_t l1entry = 0, l2entry = 0;
  278. unsigned int l1index, l2index;
  279. l1index = iova >> 20;
  280. l2index = (iova >> 12) & 0xff;
  281. spin_lock(&sh_domain->map_lock);
  282. if (sh_domain->l2[l1index].pgtable)
  283. l2entry = pgtable_read(&sh_domain->l2[l1index], l2index);
  284. else
  285. l1entry = pgtable_read(&sh_domain->l1, l1index);
  286. spin_unlock(&sh_domain->map_lock);
  287. switch (l2entry & 3) {
  288. case 1:
  289. return (l2entry & ~0xffff) | (iova & 0xffff);
  290. case 2:
  291. return (l2entry & ~0xfff) | (iova & 0xfff);
  292. default:
  293. if ((l1entry & 3) == 2)
  294. return (l1entry & ~0xfffff) | (iova & 0xfffff);
  295. return 0;
  296. }
  297. }
  298. static int find_dev_name(struct shmobile_ipmmu *ipmmu, const char *dev_name)
  299. {
  300. unsigned int i, n = ipmmu->num_dev_names;
  301. for (i = 0; i < n; i++) {
  302. if (strcmp(ipmmu->dev_names[i], dev_name) == 0)
  303. return 1;
  304. }
  305. return 0;
  306. }
  307. static int shmobile_iommu_add_device(struct device *dev)
  308. {
  309. struct shmobile_iommu_archdata *archdata = ipmmu_archdata;
  310. struct dma_iommu_mapping *mapping;
  311. if (!find_dev_name(archdata->ipmmu, dev_name(dev)))
  312. return 0;
  313. mapping = archdata->iommu_mapping;
  314. if (!mapping) {
  315. mapping = arm_iommu_create_mapping(&platform_bus_type, 0,
  316. L1_LEN << 20);
  317. if (IS_ERR(mapping))
  318. return PTR_ERR(mapping);
  319. archdata->iommu_mapping = mapping;
  320. }
  321. dev->archdata.iommu = archdata;
  322. if (arm_iommu_attach_device(dev, mapping))
  323. pr_err("arm_iommu_attach_device failed\n");
  324. return 0;
  325. }
  326. static const struct iommu_ops shmobile_iommu_ops = {
  327. .domain_alloc = shmobile_iommu_domain_alloc,
  328. .domain_free = shmobile_iommu_domain_free,
  329. .attach_dev = shmobile_iommu_attach_device,
  330. .detach_dev = shmobile_iommu_detach_device,
  331. .map = shmobile_iommu_map,
  332. .unmap = shmobile_iommu_unmap,
  333. .map_sg = default_iommu_map_sg,
  334. .iova_to_phys = shmobile_iommu_iova_to_phys,
  335. .add_device = shmobile_iommu_add_device,
  336. .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K,
  337. };
  338. int ipmmu_iommu_init(struct shmobile_ipmmu *ipmmu)
  339. {
  340. static struct shmobile_iommu_archdata *archdata;
  341. l1cache = kmem_cache_create("shmobile-iommu-pgtable1", L1_SIZE,
  342. L1_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
  343. if (!l1cache)
  344. return -ENOMEM;
  345. l2cache = kmem_cache_create("shmobile-iommu-pgtable2", L2_SIZE,
  346. L2_ALIGN, SLAB_HWCACHE_ALIGN, NULL);
  347. if (!l2cache) {
  348. kmem_cache_destroy(l1cache);
  349. return -ENOMEM;
  350. }
  351. archdata = kzalloc(sizeof(*archdata), GFP_KERNEL);
  352. if (!archdata) {
  353. kmem_cache_destroy(l1cache);
  354. kmem_cache_destroy(l2cache);
  355. return -ENOMEM;
  356. }
  357. spin_lock_init(&archdata->attach_lock);
  358. archdata->ipmmu = ipmmu;
  359. ipmmu_archdata = archdata;
  360. bus_set_iommu(&platform_bus_type, &shmobile_iommu_ops);
  361. return 0;
  362. }