img-hash.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. /*
  2. * Copyright (c) 2014 Imagination Technologies
  3. * Authors: Will Thomas, James Hartley
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as published
  7. * by the Free Software Foundation.
  8. *
  9. * Interface structure taken from omap-sham driver
  10. */
  11. #include <linux/clk.h>
  12. #include <linux/dmaengine.h>
  13. #include <linux/interrupt.h>
  14. #include <linux/io.h>
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/of_device.h>
  18. #include <linux/platform_device.h>
  19. #include <linux/scatterlist.h>
  20. #include <crypto/internal/hash.h>
  21. #include <crypto/md5.h>
  22. #include <crypto/sha.h>
  23. #define CR_RESET 0
  24. #define CR_RESET_SET 1
  25. #define CR_RESET_UNSET 0
  26. #define CR_MESSAGE_LENGTH_H 0x4
  27. #define CR_MESSAGE_LENGTH_L 0x8
  28. #define CR_CONTROL 0xc
  29. #define CR_CONTROL_BYTE_ORDER_3210 0
  30. #define CR_CONTROL_BYTE_ORDER_0123 1
  31. #define CR_CONTROL_BYTE_ORDER_2310 2
  32. #define CR_CONTROL_BYTE_ORDER_1032 3
  33. #define CR_CONTROL_BYTE_ORDER_SHIFT 8
  34. #define CR_CONTROL_ALGO_MD5 0
  35. #define CR_CONTROL_ALGO_SHA1 1
  36. #define CR_CONTROL_ALGO_SHA224 2
  37. #define CR_CONTROL_ALGO_SHA256 3
  38. #define CR_INTSTAT 0x10
  39. #define CR_INTENAB 0x14
  40. #define CR_INTCLEAR 0x18
  41. #define CR_INT_RESULTS_AVAILABLE BIT(0)
  42. #define CR_INT_NEW_RESULTS_SET BIT(1)
  43. #define CR_INT_RESULT_READ_ERR BIT(2)
  44. #define CR_INT_MESSAGE_WRITE_ERROR BIT(3)
  45. #define CR_INT_STATUS BIT(8)
  46. #define CR_RESULT_QUEUE 0x1c
  47. #define CR_RSD0 0x40
  48. #define CR_CORE_REV 0x50
  49. #define CR_CORE_DES1 0x60
  50. #define CR_CORE_DES2 0x70
  51. #define DRIVER_FLAGS_BUSY BIT(0)
  52. #define DRIVER_FLAGS_FINAL BIT(1)
  53. #define DRIVER_FLAGS_DMA_ACTIVE BIT(2)
  54. #define DRIVER_FLAGS_OUTPUT_READY BIT(3)
  55. #define DRIVER_FLAGS_INIT BIT(4)
  56. #define DRIVER_FLAGS_CPU BIT(5)
  57. #define DRIVER_FLAGS_DMA_READY BIT(6)
  58. #define DRIVER_FLAGS_ERROR BIT(7)
  59. #define DRIVER_FLAGS_SG BIT(8)
  60. #define DRIVER_FLAGS_SHA1 BIT(18)
  61. #define DRIVER_FLAGS_SHA224 BIT(19)
  62. #define DRIVER_FLAGS_SHA256 BIT(20)
  63. #define DRIVER_FLAGS_MD5 BIT(21)
  64. #define IMG_HASH_QUEUE_LENGTH 20
  65. #define IMG_HASH_DMA_THRESHOLD 64
  66. #ifdef __LITTLE_ENDIAN
  67. #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_3210
  68. #else
  69. #define IMG_HASH_BYTE_ORDER CR_CONTROL_BYTE_ORDER_0123
  70. #endif
  71. struct img_hash_dev;
  72. struct img_hash_request_ctx {
  73. struct img_hash_dev *hdev;
  74. u8 digest[SHA256_DIGEST_SIZE] __aligned(sizeof(u32));
  75. unsigned long flags;
  76. size_t digsize;
  77. dma_addr_t dma_addr;
  78. size_t dma_ct;
  79. /* sg root */
  80. struct scatterlist *sgfirst;
  81. /* walk state */
  82. struct scatterlist *sg;
  83. size_t nents;
  84. size_t offset;
  85. unsigned int total;
  86. size_t sent;
  87. unsigned long op;
  88. size_t bufcnt;
  89. u8 buffer[0] __aligned(sizeof(u32));
  90. struct ahash_request fallback_req;
  91. };
  92. struct img_hash_ctx {
  93. struct img_hash_dev *hdev;
  94. unsigned long flags;
  95. struct crypto_ahash *fallback;
  96. };
  97. struct img_hash_dev {
  98. struct list_head list;
  99. struct device *dev;
  100. struct clk *hash_clk;
  101. struct clk *sys_clk;
  102. void __iomem *io_base;
  103. phys_addr_t bus_addr;
  104. void __iomem *cpu_addr;
  105. spinlock_t lock;
  106. int err;
  107. struct tasklet_struct done_task;
  108. struct tasklet_struct dma_task;
  109. unsigned long flags;
  110. struct crypto_queue queue;
  111. struct ahash_request *req;
  112. struct dma_chan *dma_lch;
  113. };
  114. struct img_hash_drv {
  115. struct list_head dev_list;
  116. spinlock_t lock;
  117. };
  118. static struct img_hash_drv img_hash = {
  119. .dev_list = LIST_HEAD_INIT(img_hash.dev_list),
  120. .lock = __SPIN_LOCK_UNLOCKED(img_hash.lock),
  121. };
  122. static inline u32 img_hash_read(struct img_hash_dev *hdev, u32 offset)
  123. {
  124. return readl_relaxed(hdev->io_base + offset);
  125. }
  126. static inline void img_hash_write(struct img_hash_dev *hdev,
  127. u32 offset, u32 value)
  128. {
  129. writel_relaxed(value, hdev->io_base + offset);
  130. }
  131. static inline u32 img_hash_read_result_queue(struct img_hash_dev *hdev)
  132. {
  133. return be32_to_cpu(img_hash_read(hdev, CR_RESULT_QUEUE));
  134. }
  135. static void img_hash_start(struct img_hash_dev *hdev, bool dma)
  136. {
  137. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  138. u32 cr = IMG_HASH_BYTE_ORDER << CR_CONTROL_BYTE_ORDER_SHIFT;
  139. if (ctx->flags & DRIVER_FLAGS_MD5)
  140. cr |= CR_CONTROL_ALGO_MD5;
  141. else if (ctx->flags & DRIVER_FLAGS_SHA1)
  142. cr |= CR_CONTROL_ALGO_SHA1;
  143. else if (ctx->flags & DRIVER_FLAGS_SHA224)
  144. cr |= CR_CONTROL_ALGO_SHA224;
  145. else if (ctx->flags & DRIVER_FLAGS_SHA256)
  146. cr |= CR_CONTROL_ALGO_SHA256;
  147. dev_dbg(hdev->dev, "Starting hash process\n");
  148. img_hash_write(hdev, CR_CONTROL, cr);
  149. /*
  150. * The hardware block requires two cycles between writing the control
  151. * register and writing the first word of data in non DMA mode, to
  152. * ensure the first data write is not grouped in burst with the control
  153. * register write a read is issued to 'flush' the bus.
  154. */
  155. if (!dma)
  156. img_hash_read(hdev, CR_CONTROL);
  157. }
  158. static int img_hash_xmit_cpu(struct img_hash_dev *hdev, const u8 *buf,
  159. size_t length, int final)
  160. {
  161. u32 count, len32;
  162. const u32 *buffer = (const u32 *)buf;
  163. dev_dbg(hdev->dev, "xmit_cpu: length: %zu bytes\n", length);
  164. if (final)
  165. hdev->flags |= DRIVER_FLAGS_FINAL;
  166. len32 = DIV_ROUND_UP(length, sizeof(u32));
  167. for (count = 0; count < len32; count++)
  168. writel_relaxed(buffer[count], hdev->cpu_addr);
  169. return -EINPROGRESS;
  170. }
  171. static void img_hash_dma_callback(void *data)
  172. {
  173. struct img_hash_dev *hdev = (struct img_hash_dev *)data;
  174. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  175. if (ctx->bufcnt) {
  176. img_hash_xmit_cpu(hdev, ctx->buffer, ctx->bufcnt, 0);
  177. ctx->bufcnt = 0;
  178. }
  179. if (ctx->sg)
  180. tasklet_schedule(&hdev->dma_task);
  181. }
  182. static int img_hash_xmit_dma(struct img_hash_dev *hdev, struct scatterlist *sg)
  183. {
  184. struct dma_async_tx_descriptor *desc;
  185. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  186. ctx->dma_ct = dma_map_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
  187. if (ctx->dma_ct == 0) {
  188. dev_err(hdev->dev, "Invalid DMA sg\n");
  189. hdev->err = -EINVAL;
  190. return -EINVAL;
  191. }
  192. desc = dmaengine_prep_slave_sg(hdev->dma_lch,
  193. sg,
  194. ctx->dma_ct,
  195. DMA_MEM_TO_DEV,
  196. DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
  197. if (!desc) {
  198. dev_err(hdev->dev, "Null DMA descriptor\n");
  199. hdev->err = -EINVAL;
  200. dma_unmap_sg(hdev->dev, sg, 1, DMA_MEM_TO_DEV);
  201. return -EINVAL;
  202. }
  203. desc->callback = img_hash_dma_callback;
  204. desc->callback_param = hdev;
  205. dmaengine_submit(desc);
  206. dma_async_issue_pending(hdev->dma_lch);
  207. return 0;
  208. }
  209. static int img_hash_write_via_cpu(struct img_hash_dev *hdev)
  210. {
  211. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  212. ctx->bufcnt = sg_copy_to_buffer(hdev->req->src, sg_nents(ctx->sg),
  213. ctx->buffer, hdev->req->nbytes);
  214. ctx->total = hdev->req->nbytes;
  215. ctx->bufcnt = 0;
  216. hdev->flags |= (DRIVER_FLAGS_CPU | DRIVER_FLAGS_FINAL);
  217. img_hash_start(hdev, false);
  218. return img_hash_xmit_cpu(hdev, ctx->buffer, ctx->total, 1);
  219. }
  220. static int img_hash_finish(struct ahash_request *req)
  221. {
  222. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  223. if (!req->result)
  224. return -EINVAL;
  225. memcpy(req->result, ctx->digest, ctx->digsize);
  226. return 0;
  227. }
  228. static void img_hash_copy_hash(struct ahash_request *req)
  229. {
  230. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  231. u32 *hash = (u32 *)ctx->digest;
  232. int i;
  233. for (i = (ctx->digsize / sizeof(u32)) - 1; i >= 0; i--)
  234. hash[i] = img_hash_read_result_queue(ctx->hdev);
  235. }
  236. static void img_hash_finish_req(struct ahash_request *req, int err)
  237. {
  238. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  239. struct img_hash_dev *hdev = ctx->hdev;
  240. if (!err) {
  241. img_hash_copy_hash(req);
  242. if (DRIVER_FLAGS_FINAL & hdev->flags)
  243. err = img_hash_finish(req);
  244. } else {
  245. dev_warn(hdev->dev, "Hash failed with error %d\n", err);
  246. ctx->flags |= DRIVER_FLAGS_ERROR;
  247. }
  248. hdev->flags &= ~(DRIVER_FLAGS_DMA_READY | DRIVER_FLAGS_OUTPUT_READY |
  249. DRIVER_FLAGS_CPU | DRIVER_FLAGS_BUSY | DRIVER_FLAGS_FINAL);
  250. if (req->base.complete)
  251. req->base.complete(&req->base, err);
  252. }
  253. static int img_hash_write_via_dma(struct img_hash_dev *hdev)
  254. {
  255. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  256. img_hash_start(hdev, true);
  257. dev_dbg(hdev->dev, "xmit dma size: %d\n", ctx->total);
  258. if (!ctx->total)
  259. hdev->flags |= DRIVER_FLAGS_FINAL;
  260. hdev->flags |= DRIVER_FLAGS_DMA_ACTIVE | DRIVER_FLAGS_FINAL;
  261. tasklet_schedule(&hdev->dma_task);
  262. return -EINPROGRESS;
  263. }
  264. static int img_hash_dma_init(struct img_hash_dev *hdev)
  265. {
  266. struct dma_slave_config dma_conf;
  267. int err = -EINVAL;
  268. hdev->dma_lch = dma_request_slave_channel(hdev->dev, "tx");
  269. if (!hdev->dma_lch) {
  270. dev_err(hdev->dev, "Couldn't acquire a slave DMA channel.\n");
  271. return -EBUSY;
  272. }
  273. dma_conf.direction = DMA_MEM_TO_DEV;
  274. dma_conf.dst_addr = hdev->bus_addr;
  275. dma_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
  276. dma_conf.dst_maxburst = 16;
  277. dma_conf.device_fc = false;
  278. err = dmaengine_slave_config(hdev->dma_lch, &dma_conf);
  279. if (err) {
  280. dev_err(hdev->dev, "Couldn't configure DMA slave.\n");
  281. dma_release_channel(hdev->dma_lch);
  282. return err;
  283. }
  284. return 0;
  285. }
  286. static void img_hash_dma_task(unsigned long d)
  287. {
  288. struct img_hash_dev *hdev = (struct img_hash_dev *)d;
  289. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  290. u8 *addr;
  291. size_t nbytes, bleft, wsend, len, tbc;
  292. struct scatterlist tsg;
  293. if (!ctx->sg)
  294. return;
  295. addr = sg_virt(ctx->sg);
  296. nbytes = ctx->sg->length - ctx->offset;
  297. /*
  298. * The hash accelerator does not support a data valid mask. This means
  299. * that if each dma (i.e. per page) is not a multiple of 4 bytes, the
  300. * padding bytes in the last word written by that dma would erroneously
  301. * be included in the hash. To avoid this we round down the transfer,
  302. * and add the excess to the start of the next dma. It does not matter
  303. * that the final dma may not be a multiple of 4 bytes as the hashing
  304. * block is programmed to accept the correct number of bytes.
  305. */
  306. bleft = nbytes % 4;
  307. wsend = (nbytes / 4);
  308. if (wsend) {
  309. sg_init_one(&tsg, addr + ctx->offset, wsend * 4);
  310. if (img_hash_xmit_dma(hdev, &tsg)) {
  311. dev_err(hdev->dev, "DMA failed, falling back to CPU");
  312. ctx->flags |= DRIVER_FLAGS_CPU;
  313. hdev->err = 0;
  314. img_hash_xmit_cpu(hdev, addr + ctx->offset,
  315. wsend * 4, 0);
  316. ctx->sent += wsend * 4;
  317. wsend = 0;
  318. } else {
  319. ctx->sent += wsend * 4;
  320. }
  321. }
  322. if (bleft) {
  323. ctx->bufcnt = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
  324. ctx->buffer, bleft, ctx->sent);
  325. tbc = 0;
  326. ctx->sg = sg_next(ctx->sg);
  327. while (ctx->sg && (ctx->bufcnt < 4)) {
  328. len = ctx->sg->length;
  329. if (likely(len > (4 - ctx->bufcnt)))
  330. len = 4 - ctx->bufcnt;
  331. tbc = sg_pcopy_to_buffer(ctx->sgfirst, ctx->nents,
  332. ctx->buffer + ctx->bufcnt, len,
  333. ctx->sent + ctx->bufcnt);
  334. ctx->bufcnt += tbc;
  335. if (tbc >= ctx->sg->length) {
  336. ctx->sg = sg_next(ctx->sg);
  337. tbc = 0;
  338. }
  339. }
  340. ctx->sent += ctx->bufcnt;
  341. ctx->offset = tbc;
  342. if (!wsend)
  343. img_hash_dma_callback(hdev);
  344. } else {
  345. ctx->offset = 0;
  346. ctx->sg = sg_next(ctx->sg);
  347. }
  348. }
  349. static int img_hash_write_via_dma_stop(struct img_hash_dev *hdev)
  350. {
  351. struct img_hash_request_ctx *ctx = ahash_request_ctx(hdev->req);
  352. if (ctx->flags & DRIVER_FLAGS_SG)
  353. dma_unmap_sg(hdev->dev, ctx->sg, ctx->dma_ct, DMA_TO_DEVICE);
  354. return 0;
  355. }
  356. static int img_hash_process_data(struct img_hash_dev *hdev)
  357. {
  358. struct ahash_request *req = hdev->req;
  359. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  360. int err = 0;
  361. ctx->bufcnt = 0;
  362. if (req->nbytes >= IMG_HASH_DMA_THRESHOLD) {
  363. dev_dbg(hdev->dev, "process data request(%d bytes) using DMA\n",
  364. req->nbytes);
  365. err = img_hash_write_via_dma(hdev);
  366. } else {
  367. dev_dbg(hdev->dev, "process data request(%d bytes) using CPU\n",
  368. req->nbytes);
  369. err = img_hash_write_via_cpu(hdev);
  370. }
  371. return err;
  372. }
  373. static int img_hash_hw_init(struct img_hash_dev *hdev)
  374. {
  375. unsigned long long nbits;
  376. u32 u, l;
  377. img_hash_write(hdev, CR_RESET, CR_RESET_SET);
  378. img_hash_write(hdev, CR_RESET, CR_RESET_UNSET);
  379. img_hash_write(hdev, CR_INTENAB, CR_INT_NEW_RESULTS_SET);
  380. nbits = (u64)hdev->req->nbytes << 3;
  381. u = nbits >> 32;
  382. l = nbits;
  383. img_hash_write(hdev, CR_MESSAGE_LENGTH_H, u);
  384. img_hash_write(hdev, CR_MESSAGE_LENGTH_L, l);
  385. if (!(DRIVER_FLAGS_INIT & hdev->flags)) {
  386. hdev->flags |= DRIVER_FLAGS_INIT;
  387. hdev->err = 0;
  388. }
  389. dev_dbg(hdev->dev, "hw initialized, nbits: %llx\n", nbits);
  390. return 0;
  391. }
  392. static int img_hash_init(struct ahash_request *req)
  393. {
  394. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  395. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  396. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  397. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  398. rctx->fallback_req.base.flags = req->base.flags
  399. & CRYPTO_TFM_REQ_MAY_SLEEP;
  400. return crypto_ahash_init(&rctx->fallback_req);
  401. }
  402. static int img_hash_handle_queue(struct img_hash_dev *hdev,
  403. struct ahash_request *req)
  404. {
  405. struct crypto_async_request *async_req, *backlog;
  406. struct img_hash_request_ctx *ctx;
  407. unsigned long flags;
  408. int err = 0, res = 0;
  409. spin_lock_irqsave(&hdev->lock, flags);
  410. if (req)
  411. res = ahash_enqueue_request(&hdev->queue, req);
  412. if (DRIVER_FLAGS_BUSY & hdev->flags) {
  413. spin_unlock_irqrestore(&hdev->lock, flags);
  414. return res;
  415. }
  416. backlog = crypto_get_backlog(&hdev->queue);
  417. async_req = crypto_dequeue_request(&hdev->queue);
  418. if (async_req)
  419. hdev->flags |= DRIVER_FLAGS_BUSY;
  420. spin_unlock_irqrestore(&hdev->lock, flags);
  421. if (!async_req)
  422. return res;
  423. if (backlog)
  424. backlog->complete(backlog, -EINPROGRESS);
  425. req = ahash_request_cast(async_req);
  426. hdev->req = req;
  427. ctx = ahash_request_ctx(req);
  428. dev_info(hdev->dev, "processing req, op: %lu, bytes: %d\n",
  429. ctx->op, req->nbytes);
  430. err = img_hash_hw_init(hdev);
  431. if (!err)
  432. err = img_hash_process_data(hdev);
  433. if (err != -EINPROGRESS) {
  434. /* done_task will not finish so do it here */
  435. img_hash_finish_req(req, err);
  436. }
  437. return res;
  438. }
  439. static int img_hash_update(struct ahash_request *req)
  440. {
  441. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  442. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  443. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  444. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  445. rctx->fallback_req.base.flags = req->base.flags
  446. & CRYPTO_TFM_REQ_MAY_SLEEP;
  447. rctx->fallback_req.nbytes = req->nbytes;
  448. rctx->fallback_req.src = req->src;
  449. return crypto_ahash_update(&rctx->fallback_req);
  450. }
  451. static int img_hash_final(struct ahash_request *req)
  452. {
  453. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  454. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  455. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  456. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  457. rctx->fallback_req.base.flags = req->base.flags
  458. & CRYPTO_TFM_REQ_MAY_SLEEP;
  459. rctx->fallback_req.result = req->result;
  460. return crypto_ahash_final(&rctx->fallback_req);
  461. }
  462. static int img_hash_finup(struct ahash_request *req)
  463. {
  464. struct img_hash_request_ctx *rctx = ahash_request_ctx(req);
  465. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  466. struct img_hash_ctx *ctx = crypto_ahash_ctx(tfm);
  467. ahash_request_set_tfm(&rctx->fallback_req, ctx->fallback);
  468. rctx->fallback_req.base.flags = req->base.flags
  469. & CRYPTO_TFM_REQ_MAY_SLEEP;
  470. rctx->fallback_req.nbytes = req->nbytes;
  471. rctx->fallback_req.src = req->src;
  472. rctx->fallback_req.result = req->result;
  473. return crypto_ahash_finup(&rctx->fallback_req);
  474. }
  475. static int img_hash_digest(struct ahash_request *req)
  476. {
  477. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  478. struct img_hash_ctx *tctx = crypto_ahash_ctx(tfm);
  479. struct img_hash_request_ctx *ctx = ahash_request_ctx(req);
  480. struct img_hash_dev *hdev = NULL;
  481. struct img_hash_dev *tmp;
  482. int err;
  483. spin_lock(&img_hash.lock);
  484. if (!tctx->hdev) {
  485. list_for_each_entry(tmp, &img_hash.dev_list, list) {
  486. hdev = tmp;
  487. break;
  488. }
  489. tctx->hdev = hdev;
  490. } else {
  491. hdev = tctx->hdev;
  492. }
  493. spin_unlock(&img_hash.lock);
  494. ctx->hdev = hdev;
  495. ctx->flags = 0;
  496. ctx->digsize = crypto_ahash_digestsize(tfm);
  497. switch (ctx->digsize) {
  498. case SHA1_DIGEST_SIZE:
  499. ctx->flags |= DRIVER_FLAGS_SHA1;
  500. break;
  501. case SHA256_DIGEST_SIZE:
  502. ctx->flags |= DRIVER_FLAGS_SHA256;
  503. break;
  504. case SHA224_DIGEST_SIZE:
  505. ctx->flags |= DRIVER_FLAGS_SHA224;
  506. break;
  507. case MD5_DIGEST_SIZE:
  508. ctx->flags |= DRIVER_FLAGS_MD5;
  509. break;
  510. default:
  511. return -EINVAL;
  512. }
  513. ctx->bufcnt = 0;
  514. ctx->offset = 0;
  515. ctx->sent = 0;
  516. ctx->total = req->nbytes;
  517. ctx->sg = req->src;
  518. ctx->sgfirst = req->src;
  519. ctx->nents = sg_nents(ctx->sg);
  520. err = img_hash_handle_queue(tctx->hdev, req);
  521. return err;
  522. }
  523. static int img_hash_cra_init(struct crypto_tfm *tfm)
  524. {
  525. struct img_hash_ctx *ctx = crypto_tfm_ctx(tfm);
  526. const char *alg_name = crypto_tfm_alg_name(tfm);
  527. int err = -ENOMEM;
  528. ctx->fallback = crypto_alloc_ahash(alg_name, 0,
  529. CRYPTO_ALG_NEED_FALLBACK);
  530. if (IS_ERR(ctx->fallback)) {
  531. pr_err("img_hash: Could not load fallback driver.\n");
  532. err = PTR_ERR(ctx->fallback);
  533. goto err;
  534. }
  535. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  536. sizeof(struct img_hash_request_ctx) +
  537. IMG_HASH_DMA_THRESHOLD);
  538. return 0;
  539. err:
  540. return err;
  541. }
  542. static void img_hash_cra_exit(struct crypto_tfm *tfm)
  543. {
  544. struct img_hash_ctx *tctx = crypto_tfm_ctx(tfm);
  545. crypto_free_ahash(tctx->fallback);
  546. }
  547. static irqreturn_t img_irq_handler(int irq, void *dev_id)
  548. {
  549. struct img_hash_dev *hdev = dev_id;
  550. u32 reg;
  551. reg = img_hash_read(hdev, CR_INTSTAT);
  552. img_hash_write(hdev, CR_INTCLEAR, reg);
  553. if (reg & CR_INT_NEW_RESULTS_SET) {
  554. dev_dbg(hdev->dev, "IRQ CR_INT_NEW_RESULTS_SET\n");
  555. if (DRIVER_FLAGS_BUSY & hdev->flags) {
  556. hdev->flags |= DRIVER_FLAGS_OUTPUT_READY;
  557. if (!(DRIVER_FLAGS_CPU & hdev->flags))
  558. hdev->flags |= DRIVER_FLAGS_DMA_READY;
  559. tasklet_schedule(&hdev->done_task);
  560. } else {
  561. dev_warn(hdev->dev,
  562. "HASH interrupt when no active requests.\n");
  563. }
  564. } else if (reg & CR_INT_RESULTS_AVAILABLE) {
  565. dev_warn(hdev->dev,
  566. "IRQ triggered before the hash had completed\n");
  567. } else if (reg & CR_INT_RESULT_READ_ERR) {
  568. dev_warn(hdev->dev,
  569. "Attempt to read from an empty result queue\n");
  570. } else if (reg & CR_INT_MESSAGE_WRITE_ERROR) {
  571. dev_warn(hdev->dev,
  572. "Data written before the hardware was configured\n");
  573. }
  574. return IRQ_HANDLED;
  575. }
  576. static struct ahash_alg img_algs[] = {
  577. {
  578. .init = img_hash_init,
  579. .update = img_hash_update,
  580. .final = img_hash_final,
  581. .finup = img_hash_finup,
  582. .digest = img_hash_digest,
  583. .halg = {
  584. .digestsize = MD5_DIGEST_SIZE,
  585. .base = {
  586. .cra_name = "md5",
  587. .cra_driver_name = "img-md5",
  588. .cra_priority = 300,
  589. .cra_flags =
  590. CRYPTO_ALG_ASYNC |
  591. CRYPTO_ALG_NEED_FALLBACK,
  592. .cra_blocksize = MD5_HMAC_BLOCK_SIZE,
  593. .cra_ctxsize = sizeof(struct img_hash_ctx),
  594. .cra_init = img_hash_cra_init,
  595. .cra_exit = img_hash_cra_exit,
  596. .cra_module = THIS_MODULE,
  597. }
  598. }
  599. },
  600. {
  601. .init = img_hash_init,
  602. .update = img_hash_update,
  603. .final = img_hash_final,
  604. .finup = img_hash_finup,
  605. .digest = img_hash_digest,
  606. .halg = {
  607. .digestsize = SHA1_DIGEST_SIZE,
  608. .base = {
  609. .cra_name = "sha1",
  610. .cra_driver_name = "img-sha1",
  611. .cra_priority = 300,
  612. .cra_flags =
  613. CRYPTO_ALG_ASYNC |
  614. CRYPTO_ALG_NEED_FALLBACK,
  615. .cra_blocksize = SHA1_BLOCK_SIZE,
  616. .cra_ctxsize = sizeof(struct img_hash_ctx),
  617. .cra_init = img_hash_cra_init,
  618. .cra_exit = img_hash_cra_exit,
  619. .cra_module = THIS_MODULE,
  620. }
  621. }
  622. },
  623. {
  624. .init = img_hash_init,
  625. .update = img_hash_update,
  626. .final = img_hash_final,
  627. .finup = img_hash_finup,
  628. .digest = img_hash_digest,
  629. .halg = {
  630. .digestsize = SHA224_DIGEST_SIZE,
  631. .base = {
  632. .cra_name = "sha224",
  633. .cra_driver_name = "img-sha224",
  634. .cra_priority = 300,
  635. .cra_flags =
  636. CRYPTO_ALG_ASYNC |
  637. CRYPTO_ALG_NEED_FALLBACK,
  638. .cra_blocksize = SHA224_BLOCK_SIZE,
  639. .cra_ctxsize = sizeof(struct img_hash_ctx),
  640. .cra_init = img_hash_cra_init,
  641. .cra_exit = img_hash_cra_exit,
  642. .cra_module = THIS_MODULE,
  643. }
  644. }
  645. },
  646. {
  647. .init = img_hash_init,
  648. .update = img_hash_update,
  649. .final = img_hash_final,
  650. .finup = img_hash_finup,
  651. .digest = img_hash_digest,
  652. .halg = {
  653. .digestsize = SHA256_DIGEST_SIZE,
  654. .base = {
  655. .cra_name = "sha256",
  656. .cra_driver_name = "img-sha256",
  657. .cra_priority = 300,
  658. .cra_flags =
  659. CRYPTO_ALG_ASYNC |
  660. CRYPTO_ALG_NEED_FALLBACK,
  661. .cra_blocksize = SHA256_BLOCK_SIZE,
  662. .cra_ctxsize = sizeof(struct img_hash_ctx),
  663. .cra_init = img_hash_cra_init,
  664. .cra_exit = img_hash_cra_exit,
  665. .cra_module = THIS_MODULE,
  666. }
  667. }
  668. }
  669. };
  670. static int img_register_algs(struct img_hash_dev *hdev)
  671. {
  672. int i, err;
  673. for (i = 0; i < ARRAY_SIZE(img_algs); i++) {
  674. err = crypto_register_ahash(&img_algs[i]);
  675. if (err)
  676. goto err_reg;
  677. }
  678. return 0;
  679. err_reg:
  680. for (; i--; )
  681. crypto_unregister_ahash(&img_algs[i]);
  682. return err;
  683. }
  684. static int img_unregister_algs(struct img_hash_dev *hdev)
  685. {
  686. int i;
  687. for (i = 0; i < ARRAY_SIZE(img_algs); i++)
  688. crypto_unregister_ahash(&img_algs[i]);
  689. return 0;
  690. }
  691. static void img_hash_done_task(unsigned long data)
  692. {
  693. struct img_hash_dev *hdev = (struct img_hash_dev *)data;
  694. int err = 0;
  695. if (hdev->err == -EINVAL) {
  696. err = hdev->err;
  697. goto finish;
  698. }
  699. if (!(DRIVER_FLAGS_BUSY & hdev->flags)) {
  700. img_hash_handle_queue(hdev, NULL);
  701. return;
  702. }
  703. if (DRIVER_FLAGS_CPU & hdev->flags) {
  704. if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
  705. hdev->flags &= ~DRIVER_FLAGS_OUTPUT_READY;
  706. goto finish;
  707. }
  708. } else if (DRIVER_FLAGS_DMA_READY & hdev->flags) {
  709. if (DRIVER_FLAGS_DMA_ACTIVE & hdev->flags) {
  710. hdev->flags &= ~DRIVER_FLAGS_DMA_ACTIVE;
  711. img_hash_write_via_dma_stop(hdev);
  712. if (hdev->err) {
  713. err = hdev->err;
  714. goto finish;
  715. }
  716. }
  717. if (DRIVER_FLAGS_OUTPUT_READY & hdev->flags) {
  718. hdev->flags &= ~(DRIVER_FLAGS_DMA_READY |
  719. DRIVER_FLAGS_OUTPUT_READY);
  720. goto finish;
  721. }
  722. }
  723. return;
  724. finish:
  725. img_hash_finish_req(hdev->req, err);
  726. }
  727. static const struct of_device_id img_hash_match[] = {
  728. { .compatible = "img,hash-accelerator" },
  729. {}
  730. };
  731. MODULE_DEVICE_TABLE(of, img_hash_match);
  732. static int img_hash_probe(struct platform_device *pdev)
  733. {
  734. struct img_hash_dev *hdev;
  735. struct device *dev = &pdev->dev;
  736. struct resource *hash_res;
  737. int irq;
  738. int err;
  739. hdev = devm_kzalloc(dev, sizeof(*hdev), GFP_KERNEL);
  740. if (hdev == NULL)
  741. return -ENOMEM;
  742. spin_lock_init(&hdev->lock);
  743. hdev->dev = dev;
  744. platform_set_drvdata(pdev, hdev);
  745. INIT_LIST_HEAD(&hdev->list);
  746. tasklet_init(&hdev->done_task, img_hash_done_task, (unsigned long)hdev);
  747. tasklet_init(&hdev->dma_task, img_hash_dma_task, (unsigned long)hdev);
  748. crypto_init_queue(&hdev->queue, IMG_HASH_QUEUE_LENGTH);
  749. /* Register bank */
  750. hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  751. hdev->io_base = devm_ioremap_resource(dev, hash_res);
  752. if (IS_ERR(hdev->io_base)) {
  753. err = PTR_ERR(hdev->io_base);
  754. dev_err(dev, "can't ioremap, returned %d\n", err);
  755. goto res_err;
  756. }
  757. /* Write port (DMA or CPU) */
  758. hash_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
  759. hdev->cpu_addr = devm_ioremap_resource(dev, hash_res);
  760. if (IS_ERR(hdev->cpu_addr)) {
  761. dev_err(dev, "can't ioremap write port\n");
  762. err = PTR_ERR(hdev->cpu_addr);
  763. goto res_err;
  764. }
  765. hdev->bus_addr = hash_res->start;
  766. irq = platform_get_irq(pdev, 0);
  767. if (irq < 0) {
  768. dev_err(dev, "no IRQ resource info\n");
  769. err = irq;
  770. goto res_err;
  771. }
  772. err = devm_request_irq(dev, irq, img_irq_handler, 0,
  773. dev_name(dev), hdev);
  774. if (err) {
  775. dev_err(dev, "unable to request irq\n");
  776. goto res_err;
  777. }
  778. dev_dbg(dev, "using IRQ channel %d\n", irq);
  779. hdev->hash_clk = devm_clk_get(&pdev->dev, "hash");
  780. if (IS_ERR(hdev->hash_clk)) {
  781. dev_err(dev, "clock initialization failed.\n");
  782. err = PTR_ERR(hdev->hash_clk);
  783. goto res_err;
  784. }
  785. hdev->sys_clk = devm_clk_get(&pdev->dev, "sys");
  786. if (IS_ERR(hdev->sys_clk)) {
  787. dev_err(dev, "clock initialization failed.\n");
  788. err = PTR_ERR(hdev->sys_clk);
  789. goto res_err;
  790. }
  791. err = clk_prepare_enable(hdev->hash_clk);
  792. if (err)
  793. goto res_err;
  794. err = clk_prepare_enable(hdev->sys_clk);
  795. if (err)
  796. goto clk_err;
  797. err = img_hash_dma_init(hdev);
  798. if (err)
  799. goto dma_err;
  800. dev_dbg(dev, "using %s for DMA transfers\n",
  801. dma_chan_name(hdev->dma_lch));
  802. spin_lock(&img_hash.lock);
  803. list_add_tail(&hdev->list, &img_hash.dev_list);
  804. spin_unlock(&img_hash.lock);
  805. err = img_register_algs(hdev);
  806. if (err)
  807. goto err_algs;
  808. dev_dbg(dev, "Img MD5/SHA1/SHA224/SHA256 Hardware accelerator initialized\n");
  809. return 0;
  810. err_algs:
  811. spin_lock(&img_hash.lock);
  812. list_del(&hdev->list);
  813. spin_unlock(&img_hash.lock);
  814. dma_release_channel(hdev->dma_lch);
  815. dma_err:
  816. clk_disable_unprepare(hdev->sys_clk);
  817. clk_err:
  818. clk_disable_unprepare(hdev->hash_clk);
  819. res_err:
  820. tasklet_kill(&hdev->done_task);
  821. tasklet_kill(&hdev->dma_task);
  822. return err;
  823. }
  824. static int img_hash_remove(struct platform_device *pdev)
  825. {
  826. static struct img_hash_dev *hdev;
  827. hdev = platform_get_drvdata(pdev);
  828. spin_lock(&img_hash.lock);
  829. list_del(&hdev->list);
  830. spin_unlock(&img_hash.lock);
  831. img_unregister_algs(hdev);
  832. tasklet_kill(&hdev->done_task);
  833. tasklet_kill(&hdev->dma_task);
  834. dma_release_channel(hdev->dma_lch);
  835. clk_disable_unprepare(hdev->hash_clk);
  836. clk_disable_unprepare(hdev->sys_clk);
  837. return 0;
  838. }
  839. static struct platform_driver img_hash_driver = {
  840. .probe = img_hash_probe,
  841. .remove = img_hash_remove,
  842. .driver = {
  843. .name = "img-hash-accelerator",
  844. .of_match_table = of_match_ptr(img_hash_match),
  845. }
  846. };
  847. module_platform_driver(img_hash_driver);
  848. MODULE_LICENSE("GPL v2");
  849. MODULE_DESCRIPTION("Imgtec SHA1/224/256 & MD5 hw accelerator driver");
  850. MODULE_AUTHOR("Will Thomas.");
  851. MODULE_AUTHOR("James Hartley <james.hartley@imgtec.com>");