ccp-ops.c 52 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157
  1. /*
  2. * AMD Cryptographic Coprocessor (CCP) driver
  3. *
  4. * Copyright (C) 2013 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/kernel.h>
  14. #include <linux/pci.h>
  15. #include <linux/pci_ids.h>
  16. #include <linux/kthread.h>
  17. #include <linux/sched.h>
  18. #include <linux/interrupt.h>
  19. #include <linux/spinlock.h>
  20. #include <linux/mutex.h>
  21. #include <linux/delay.h>
  22. #include <linux/ccp.h>
  23. #include <linux/scatterlist.h>
  24. #include <crypto/scatterwalk.h>
  25. #include <crypto/sha.h>
  26. #include "ccp-dev.h"
  27. enum ccp_memtype {
  28. CCP_MEMTYPE_SYSTEM = 0,
  29. CCP_MEMTYPE_KSB,
  30. CCP_MEMTYPE_LOCAL,
  31. CCP_MEMTYPE__LAST,
  32. };
  33. struct ccp_dma_info {
  34. dma_addr_t address;
  35. unsigned int offset;
  36. unsigned int length;
  37. enum dma_data_direction dir;
  38. };
  39. struct ccp_dm_workarea {
  40. struct device *dev;
  41. struct dma_pool *dma_pool;
  42. unsigned int length;
  43. u8 *address;
  44. struct ccp_dma_info dma;
  45. };
  46. struct ccp_sg_workarea {
  47. struct scatterlist *sg;
  48. int nents;
  49. struct scatterlist *dma_sg;
  50. struct device *dma_dev;
  51. unsigned int dma_count;
  52. enum dma_data_direction dma_dir;
  53. unsigned int sg_used;
  54. u64 bytes_left;
  55. };
  56. struct ccp_data {
  57. struct ccp_sg_workarea sg_wa;
  58. struct ccp_dm_workarea dm_wa;
  59. };
  60. struct ccp_mem {
  61. enum ccp_memtype type;
  62. union {
  63. struct ccp_dma_info dma;
  64. u32 ksb;
  65. } u;
  66. };
  67. struct ccp_aes_op {
  68. enum ccp_aes_type type;
  69. enum ccp_aes_mode mode;
  70. enum ccp_aes_action action;
  71. };
  72. struct ccp_xts_aes_op {
  73. enum ccp_aes_action action;
  74. enum ccp_xts_aes_unit_size unit_size;
  75. };
  76. struct ccp_sha_op {
  77. enum ccp_sha_type type;
  78. u64 msg_bits;
  79. };
  80. struct ccp_rsa_op {
  81. u32 mod_size;
  82. u32 input_len;
  83. };
  84. struct ccp_passthru_op {
  85. enum ccp_passthru_bitwise bit_mod;
  86. enum ccp_passthru_byteswap byte_swap;
  87. };
  88. struct ccp_ecc_op {
  89. enum ccp_ecc_function function;
  90. };
  91. struct ccp_op {
  92. struct ccp_cmd_queue *cmd_q;
  93. u32 jobid;
  94. u32 ioc;
  95. u32 soc;
  96. u32 ksb_key;
  97. u32 ksb_ctx;
  98. u32 init;
  99. u32 eom;
  100. struct ccp_mem src;
  101. struct ccp_mem dst;
  102. union {
  103. struct ccp_aes_op aes;
  104. struct ccp_xts_aes_op xts;
  105. struct ccp_sha_op sha;
  106. struct ccp_rsa_op rsa;
  107. struct ccp_passthru_op passthru;
  108. struct ccp_ecc_op ecc;
  109. } u;
  110. };
  111. /* SHA initial context values */
  112. static const __be32 ccp_sha1_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
  113. cpu_to_be32(SHA1_H0), cpu_to_be32(SHA1_H1),
  114. cpu_to_be32(SHA1_H2), cpu_to_be32(SHA1_H3),
  115. cpu_to_be32(SHA1_H4), 0, 0, 0,
  116. };
  117. static const __be32 ccp_sha224_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
  118. cpu_to_be32(SHA224_H0), cpu_to_be32(SHA224_H1),
  119. cpu_to_be32(SHA224_H2), cpu_to_be32(SHA224_H3),
  120. cpu_to_be32(SHA224_H4), cpu_to_be32(SHA224_H5),
  121. cpu_to_be32(SHA224_H6), cpu_to_be32(SHA224_H7),
  122. };
  123. static const __be32 ccp_sha256_init[CCP_SHA_CTXSIZE / sizeof(__be32)] = {
  124. cpu_to_be32(SHA256_H0), cpu_to_be32(SHA256_H1),
  125. cpu_to_be32(SHA256_H2), cpu_to_be32(SHA256_H3),
  126. cpu_to_be32(SHA256_H4), cpu_to_be32(SHA256_H5),
  127. cpu_to_be32(SHA256_H6), cpu_to_be32(SHA256_H7),
  128. };
  129. /* The CCP cannot perform zero-length sha operations so the caller
  130. * is required to buffer data for the final operation. However, a
  131. * sha operation for a message with a total length of zero is valid
  132. * so known values are required to supply the result.
  133. */
  134. static const u8 ccp_sha1_zero[CCP_SHA_CTXSIZE] = {
  135. 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
  136. 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
  137. 0xaf, 0xd8, 0x07, 0x09, 0x00, 0x00, 0x00, 0x00,
  138. 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
  139. };
  140. static const u8 ccp_sha224_zero[CCP_SHA_CTXSIZE] = {
  141. 0xd1, 0x4a, 0x02, 0x8c, 0x2a, 0x3a, 0x2b, 0xc9,
  142. 0x47, 0x61, 0x02, 0xbb, 0x28, 0x82, 0x34, 0xc4,
  143. 0x15, 0xa2, 0xb0, 0x1f, 0x82, 0x8e, 0xa6, 0x2a,
  144. 0xc5, 0xb3, 0xe4, 0x2f, 0x00, 0x00, 0x00, 0x00,
  145. };
  146. static const u8 ccp_sha256_zero[CCP_SHA_CTXSIZE] = {
  147. 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
  148. 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
  149. 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
  150. 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
  151. };
  152. static u32 ccp_addr_lo(struct ccp_dma_info *info)
  153. {
  154. return lower_32_bits(info->address + info->offset);
  155. }
  156. static u32 ccp_addr_hi(struct ccp_dma_info *info)
  157. {
  158. return upper_32_bits(info->address + info->offset) & 0x0000ffff;
  159. }
  160. static int ccp_do_cmd(struct ccp_op *op, u32 *cr, unsigned int cr_count)
  161. {
  162. struct ccp_cmd_queue *cmd_q = op->cmd_q;
  163. struct ccp_device *ccp = cmd_q->ccp;
  164. void __iomem *cr_addr;
  165. u32 cr0, cmd;
  166. unsigned int i;
  167. int ret = 0;
  168. /* We could read a status register to see how many free slots
  169. * are actually available, but reading that register resets it
  170. * and you could lose some error information.
  171. */
  172. cmd_q->free_slots--;
  173. cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT)
  174. | (op->jobid << REQ0_JOBID_SHIFT)
  175. | REQ0_WAIT_FOR_WRITE;
  176. if (op->soc)
  177. cr0 |= REQ0_STOP_ON_COMPLETE
  178. | REQ0_INT_ON_COMPLETE;
  179. if (op->ioc || !cmd_q->free_slots)
  180. cr0 |= REQ0_INT_ON_COMPLETE;
  181. /* Start at CMD_REQ1 */
  182. cr_addr = ccp->io_regs + CMD_REQ0 + CMD_REQ_INCR;
  183. mutex_lock(&ccp->req_mutex);
  184. /* Write CMD_REQ1 through CMD_REQx first */
  185. for (i = 0; i < cr_count; i++, cr_addr += CMD_REQ_INCR)
  186. iowrite32(*(cr + i), cr_addr);
  187. /* Tell the CCP to start */
  188. wmb();
  189. iowrite32(cr0, ccp->io_regs + CMD_REQ0);
  190. mutex_unlock(&ccp->req_mutex);
  191. if (cr0 & REQ0_INT_ON_COMPLETE) {
  192. /* Wait for the job to complete */
  193. ret = wait_event_interruptible(cmd_q->int_queue,
  194. cmd_q->int_rcvd);
  195. if (ret || cmd_q->cmd_error) {
  196. /* On error delete all related jobs from the queue */
  197. cmd = (cmd_q->id << DEL_Q_ID_SHIFT)
  198. | op->jobid;
  199. iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
  200. if (!ret)
  201. ret = -EIO;
  202. } else if (op->soc) {
  203. /* Delete just head job from the queue on SoC */
  204. cmd = DEL_Q_ACTIVE
  205. | (cmd_q->id << DEL_Q_ID_SHIFT)
  206. | op->jobid;
  207. iowrite32(cmd, ccp->io_regs + DEL_CMD_Q_JOB);
  208. }
  209. cmd_q->free_slots = CMD_Q_DEPTH(cmd_q->q_status);
  210. cmd_q->int_rcvd = 0;
  211. }
  212. return ret;
  213. }
  214. static int ccp_perform_aes(struct ccp_op *op)
  215. {
  216. u32 cr[6];
  217. /* Fill out the register contents for REQ1 through REQ6 */
  218. cr[0] = (CCP_ENGINE_AES << REQ1_ENGINE_SHIFT)
  219. | (op->u.aes.type << REQ1_AES_TYPE_SHIFT)
  220. | (op->u.aes.mode << REQ1_AES_MODE_SHIFT)
  221. | (op->u.aes.action << REQ1_AES_ACTION_SHIFT)
  222. | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
  223. cr[1] = op->src.u.dma.length - 1;
  224. cr[2] = ccp_addr_lo(&op->src.u.dma);
  225. cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
  226. | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  227. | ccp_addr_hi(&op->src.u.dma);
  228. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  229. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  230. | ccp_addr_hi(&op->dst.u.dma);
  231. if (op->u.aes.mode == CCP_AES_MODE_CFB)
  232. cr[0] |= ((0x7f) << REQ1_AES_CFB_SIZE_SHIFT);
  233. if (op->eom)
  234. cr[0] |= REQ1_EOM;
  235. if (op->init)
  236. cr[0] |= REQ1_INIT;
  237. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  238. }
  239. static int ccp_perform_xts_aes(struct ccp_op *op)
  240. {
  241. u32 cr[6];
  242. /* Fill out the register contents for REQ1 through REQ6 */
  243. cr[0] = (CCP_ENGINE_XTS_AES_128 << REQ1_ENGINE_SHIFT)
  244. | (op->u.xts.action << REQ1_AES_ACTION_SHIFT)
  245. | (op->u.xts.unit_size << REQ1_XTS_AES_SIZE_SHIFT)
  246. | (op->ksb_key << REQ1_KEY_KSB_SHIFT);
  247. cr[1] = op->src.u.dma.length - 1;
  248. cr[2] = ccp_addr_lo(&op->src.u.dma);
  249. cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
  250. | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  251. | ccp_addr_hi(&op->src.u.dma);
  252. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  253. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  254. | ccp_addr_hi(&op->dst.u.dma);
  255. if (op->eom)
  256. cr[0] |= REQ1_EOM;
  257. if (op->init)
  258. cr[0] |= REQ1_INIT;
  259. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  260. }
  261. static int ccp_perform_sha(struct ccp_op *op)
  262. {
  263. u32 cr[6];
  264. /* Fill out the register contents for REQ1 through REQ6 */
  265. cr[0] = (CCP_ENGINE_SHA << REQ1_ENGINE_SHIFT)
  266. | (op->u.sha.type << REQ1_SHA_TYPE_SHIFT)
  267. | REQ1_INIT;
  268. cr[1] = op->src.u.dma.length - 1;
  269. cr[2] = ccp_addr_lo(&op->src.u.dma);
  270. cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
  271. | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  272. | ccp_addr_hi(&op->src.u.dma);
  273. if (op->eom) {
  274. cr[0] |= REQ1_EOM;
  275. cr[4] = lower_32_bits(op->u.sha.msg_bits);
  276. cr[5] = upper_32_bits(op->u.sha.msg_bits);
  277. } else {
  278. cr[4] = 0;
  279. cr[5] = 0;
  280. }
  281. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  282. }
  283. static int ccp_perform_rsa(struct ccp_op *op)
  284. {
  285. u32 cr[6];
  286. /* Fill out the register contents for REQ1 through REQ6 */
  287. cr[0] = (CCP_ENGINE_RSA << REQ1_ENGINE_SHIFT)
  288. | (op->u.rsa.mod_size << REQ1_RSA_MOD_SIZE_SHIFT)
  289. | (op->ksb_key << REQ1_KEY_KSB_SHIFT)
  290. | REQ1_EOM;
  291. cr[1] = op->u.rsa.input_len - 1;
  292. cr[2] = ccp_addr_lo(&op->src.u.dma);
  293. cr[3] = (op->ksb_ctx << REQ4_KSB_SHIFT)
  294. | (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  295. | ccp_addr_hi(&op->src.u.dma);
  296. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  297. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  298. | ccp_addr_hi(&op->dst.u.dma);
  299. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  300. }
  301. static int ccp_perform_passthru(struct ccp_op *op)
  302. {
  303. u32 cr[6];
  304. /* Fill out the register contents for REQ1 through REQ6 */
  305. cr[0] = (CCP_ENGINE_PASSTHRU << REQ1_ENGINE_SHIFT)
  306. | (op->u.passthru.bit_mod << REQ1_PT_BW_SHIFT)
  307. | (op->u.passthru.byte_swap << REQ1_PT_BS_SHIFT);
  308. if (op->src.type == CCP_MEMTYPE_SYSTEM)
  309. cr[1] = op->src.u.dma.length - 1;
  310. else
  311. cr[1] = op->dst.u.dma.length - 1;
  312. if (op->src.type == CCP_MEMTYPE_SYSTEM) {
  313. cr[2] = ccp_addr_lo(&op->src.u.dma);
  314. cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  315. | ccp_addr_hi(&op->src.u.dma);
  316. if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
  317. cr[3] |= (op->ksb_key << REQ4_KSB_SHIFT);
  318. } else {
  319. cr[2] = op->src.u.ksb * CCP_KSB_BYTES;
  320. cr[3] = (CCP_MEMTYPE_KSB << REQ4_MEMTYPE_SHIFT);
  321. }
  322. if (op->dst.type == CCP_MEMTYPE_SYSTEM) {
  323. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  324. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  325. | ccp_addr_hi(&op->dst.u.dma);
  326. } else {
  327. cr[4] = op->dst.u.ksb * CCP_KSB_BYTES;
  328. cr[5] = (CCP_MEMTYPE_KSB << REQ6_MEMTYPE_SHIFT);
  329. }
  330. if (op->eom)
  331. cr[0] |= REQ1_EOM;
  332. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  333. }
  334. static int ccp_perform_ecc(struct ccp_op *op)
  335. {
  336. u32 cr[6];
  337. /* Fill out the register contents for REQ1 through REQ6 */
  338. cr[0] = REQ1_ECC_AFFINE_CONVERT
  339. | (CCP_ENGINE_ECC << REQ1_ENGINE_SHIFT)
  340. | (op->u.ecc.function << REQ1_ECC_FUNCTION_SHIFT)
  341. | REQ1_EOM;
  342. cr[1] = op->src.u.dma.length - 1;
  343. cr[2] = ccp_addr_lo(&op->src.u.dma);
  344. cr[3] = (CCP_MEMTYPE_SYSTEM << REQ4_MEMTYPE_SHIFT)
  345. | ccp_addr_hi(&op->src.u.dma);
  346. cr[4] = ccp_addr_lo(&op->dst.u.dma);
  347. cr[5] = (CCP_MEMTYPE_SYSTEM << REQ6_MEMTYPE_SHIFT)
  348. | ccp_addr_hi(&op->dst.u.dma);
  349. return ccp_do_cmd(op, cr, ARRAY_SIZE(cr));
  350. }
  351. static u32 ccp_alloc_ksb(struct ccp_device *ccp, unsigned int count)
  352. {
  353. int start;
  354. for (;;) {
  355. mutex_lock(&ccp->ksb_mutex);
  356. start = (u32)bitmap_find_next_zero_area(ccp->ksb,
  357. ccp->ksb_count,
  358. ccp->ksb_start,
  359. count, 0);
  360. if (start <= ccp->ksb_count) {
  361. bitmap_set(ccp->ksb, start, count);
  362. mutex_unlock(&ccp->ksb_mutex);
  363. break;
  364. }
  365. ccp->ksb_avail = 0;
  366. mutex_unlock(&ccp->ksb_mutex);
  367. /* Wait for KSB entries to become available */
  368. if (wait_event_interruptible(ccp->ksb_queue, ccp->ksb_avail))
  369. return 0;
  370. }
  371. return KSB_START + start;
  372. }
  373. static void ccp_free_ksb(struct ccp_device *ccp, unsigned int start,
  374. unsigned int count)
  375. {
  376. if (!start)
  377. return;
  378. mutex_lock(&ccp->ksb_mutex);
  379. bitmap_clear(ccp->ksb, start - KSB_START, count);
  380. ccp->ksb_avail = 1;
  381. mutex_unlock(&ccp->ksb_mutex);
  382. wake_up_interruptible_all(&ccp->ksb_queue);
  383. }
  384. static u32 ccp_gen_jobid(struct ccp_device *ccp)
  385. {
  386. return atomic_inc_return(&ccp->current_id) & CCP_JOBID_MASK;
  387. }
  388. static void ccp_sg_free(struct ccp_sg_workarea *wa)
  389. {
  390. if (wa->dma_count)
  391. dma_unmap_sg(wa->dma_dev, wa->dma_sg, wa->nents, wa->dma_dir);
  392. wa->dma_count = 0;
  393. }
  394. static int ccp_init_sg_workarea(struct ccp_sg_workarea *wa, struct device *dev,
  395. struct scatterlist *sg, u64 len,
  396. enum dma_data_direction dma_dir)
  397. {
  398. memset(wa, 0, sizeof(*wa));
  399. wa->sg = sg;
  400. if (!sg)
  401. return 0;
  402. wa->nents = sg_nents_for_len(sg, len);
  403. if (wa->nents < 0)
  404. return wa->nents;
  405. wa->bytes_left = len;
  406. wa->sg_used = 0;
  407. if (len == 0)
  408. return 0;
  409. if (dma_dir == DMA_NONE)
  410. return 0;
  411. wa->dma_sg = sg;
  412. wa->dma_dev = dev;
  413. wa->dma_dir = dma_dir;
  414. wa->dma_count = dma_map_sg(dev, sg, wa->nents, dma_dir);
  415. if (!wa->dma_count)
  416. return -ENOMEM;
  417. return 0;
  418. }
  419. static void ccp_update_sg_workarea(struct ccp_sg_workarea *wa, unsigned int len)
  420. {
  421. unsigned int nbytes = min_t(u64, len, wa->bytes_left);
  422. if (!wa->sg)
  423. return;
  424. wa->sg_used += nbytes;
  425. wa->bytes_left -= nbytes;
  426. if (wa->sg_used == wa->sg->length) {
  427. wa->sg = sg_next(wa->sg);
  428. wa->sg_used = 0;
  429. }
  430. }
  431. static void ccp_dm_free(struct ccp_dm_workarea *wa)
  432. {
  433. if (wa->length <= CCP_DMAPOOL_MAX_SIZE) {
  434. if (wa->address)
  435. dma_pool_free(wa->dma_pool, wa->address,
  436. wa->dma.address);
  437. } else {
  438. if (wa->dma.address)
  439. dma_unmap_single(wa->dev, wa->dma.address, wa->length,
  440. wa->dma.dir);
  441. kfree(wa->address);
  442. }
  443. wa->address = NULL;
  444. wa->dma.address = 0;
  445. }
  446. static int ccp_init_dm_workarea(struct ccp_dm_workarea *wa,
  447. struct ccp_cmd_queue *cmd_q,
  448. unsigned int len,
  449. enum dma_data_direction dir)
  450. {
  451. memset(wa, 0, sizeof(*wa));
  452. if (!len)
  453. return 0;
  454. wa->dev = cmd_q->ccp->dev;
  455. wa->length = len;
  456. if (len <= CCP_DMAPOOL_MAX_SIZE) {
  457. wa->dma_pool = cmd_q->dma_pool;
  458. wa->address = dma_pool_alloc(wa->dma_pool, GFP_KERNEL,
  459. &wa->dma.address);
  460. if (!wa->address)
  461. return -ENOMEM;
  462. wa->dma.length = CCP_DMAPOOL_MAX_SIZE;
  463. memset(wa->address, 0, CCP_DMAPOOL_MAX_SIZE);
  464. } else {
  465. wa->address = kzalloc(len, GFP_KERNEL);
  466. if (!wa->address)
  467. return -ENOMEM;
  468. wa->dma.address = dma_map_single(wa->dev, wa->address, len,
  469. dir);
  470. if (!wa->dma.address)
  471. return -ENOMEM;
  472. wa->dma.length = len;
  473. }
  474. wa->dma.dir = dir;
  475. return 0;
  476. }
  477. static void ccp_set_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
  478. struct scatterlist *sg, unsigned int sg_offset,
  479. unsigned int len)
  480. {
  481. WARN_ON(!wa->address);
  482. scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
  483. 0);
  484. }
  485. static void ccp_get_dm_area(struct ccp_dm_workarea *wa, unsigned int wa_offset,
  486. struct scatterlist *sg, unsigned int sg_offset,
  487. unsigned int len)
  488. {
  489. WARN_ON(!wa->address);
  490. scatterwalk_map_and_copy(wa->address + wa_offset, sg, sg_offset, len,
  491. 1);
  492. }
  493. static int ccp_reverse_set_dm_area(struct ccp_dm_workarea *wa,
  494. struct scatterlist *sg,
  495. unsigned int len, unsigned int se_len,
  496. bool sign_extend)
  497. {
  498. unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
  499. u8 buffer[CCP_REVERSE_BUF_SIZE];
  500. if (WARN_ON(se_len > sizeof(buffer)))
  501. return -EINVAL;
  502. sg_offset = len;
  503. dm_offset = 0;
  504. nbytes = len;
  505. while (nbytes) {
  506. ksb_len = min_t(unsigned int, nbytes, se_len);
  507. sg_offset -= ksb_len;
  508. scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 0);
  509. for (i = 0; i < ksb_len; i++)
  510. wa->address[dm_offset + i] = buffer[ksb_len - i - 1];
  511. dm_offset += ksb_len;
  512. nbytes -= ksb_len;
  513. if ((ksb_len != se_len) && sign_extend) {
  514. /* Must sign-extend to nearest sign-extend length */
  515. if (wa->address[dm_offset - 1] & 0x80)
  516. memset(wa->address + dm_offset, 0xff,
  517. se_len - ksb_len);
  518. }
  519. }
  520. return 0;
  521. }
  522. static void ccp_reverse_get_dm_area(struct ccp_dm_workarea *wa,
  523. struct scatterlist *sg,
  524. unsigned int len)
  525. {
  526. unsigned int nbytes, sg_offset, dm_offset, ksb_len, i;
  527. u8 buffer[CCP_REVERSE_BUF_SIZE];
  528. sg_offset = 0;
  529. dm_offset = len;
  530. nbytes = len;
  531. while (nbytes) {
  532. ksb_len = min_t(unsigned int, nbytes, sizeof(buffer));
  533. dm_offset -= ksb_len;
  534. for (i = 0; i < ksb_len; i++)
  535. buffer[ksb_len - i - 1] = wa->address[dm_offset + i];
  536. scatterwalk_map_and_copy(buffer, sg, sg_offset, ksb_len, 1);
  537. sg_offset += ksb_len;
  538. nbytes -= ksb_len;
  539. }
  540. }
  541. static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q)
  542. {
  543. ccp_dm_free(&data->dm_wa);
  544. ccp_sg_free(&data->sg_wa);
  545. }
  546. static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q,
  547. struct scatterlist *sg, u64 sg_len,
  548. unsigned int dm_len,
  549. enum dma_data_direction dir)
  550. {
  551. int ret;
  552. memset(data, 0, sizeof(*data));
  553. ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len,
  554. dir);
  555. if (ret)
  556. goto e_err;
  557. ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir);
  558. if (ret)
  559. goto e_err;
  560. return 0;
  561. e_err:
  562. ccp_free_data(data, cmd_q);
  563. return ret;
  564. }
  565. static unsigned int ccp_queue_buf(struct ccp_data *data, unsigned int from)
  566. {
  567. struct ccp_sg_workarea *sg_wa = &data->sg_wa;
  568. struct ccp_dm_workarea *dm_wa = &data->dm_wa;
  569. unsigned int buf_count, nbytes;
  570. /* Clear the buffer if setting it */
  571. if (!from)
  572. memset(dm_wa->address, 0, dm_wa->length);
  573. if (!sg_wa->sg)
  574. return 0;
  575. /* Perform the copy operation
  576. * nbytes will always be <= UINT_MAX because dm_wa->length is
  577. * an unsigned int
  578. */
  579. nbytes = min_t(u64, sg_wa->bytes_left, dm_wa->length);
  580. scatterwalk_map_and_copy(dm_wa->address, sg_wa->sg, sg_wa->sg_used,
  581. nbytes, from);
  582. /* Update the structures and generate the count */
  583. buf_count = 0;
  584. while (sg_wa->bytes_left && (buf_count < dm_wa->length)) {
  585. nbytes = min(sg_wa->sg->length - sg_wa->sg_used,
  586. dm_wa->length - buf_count);
  587. nbytes = min_t(u64, sg_wa->bytes_left, nbytes);
  588. buf_count += nbytes;
  589. ccp_update_sg_workarea(sg_wa, nbytes);
  590. }
  591. return buf_count;
  592. }
  593. static unsigned int ccp_fill_queue_buf(struct ccp_data *data)
  594. {
  595. return ccp_queue_buf(data, 0);
  596. }
  597. static unsigned int ccp_empty_queue_buf(struct ccp_data *data)
  598. {
  599. return ccp_queue_buf(data, 1);
  600. }
  601. static void ccp_prepare_data(struct ccp_data *src, struct ccp_data *dst,
  602. struct ccp_op *op, unsigned int block_size,
  603. bool blocksize_op)
  604. {
  605. unsigned int sg_src_len, sg_dst_len, op_len;
  606. /* The CCP can only DMA from/to one address each per operation. This
  607. * requires that we find the smallest DMA area between the source
  608. * and destination. The resulting len values will always be <= UINT_MAX
  609. * because the dma length is an unsigned int.
  610. */
  611. sg_src_len = sg_dma_len(src->sg_wa.sg) - src->sg_wa.sg_used;
  612. sg_src_len = min_t(u64, src->sg_wa.bytes_left, sg_src_len);
  613. if (dst) {
  614. sg_dst_len = sg_dma_len(dst->sg_wa.sg) - dst->sg_wa.sg_used;
  615. sg_dst_len = min_t(u64, src->sg_wa.bytes_left, sg_dst_len);
  616. op_len = min(sg_src_len, sg_dst_len);
  617. } else {
  618. op_len = sg_src_len;
  619. }
  620. /* The data operation length will be at least block_size in length
  621. * or the smaller of available sg room remaining for the source or
  622. * the destination
  623. */
  624. op_len = max(op_len, block_size);
  625. /* Unless we have to buffer data, there's no reason to wait */
  626. op->soc = 0;
  627. if (sg_src_len < block_size) {
  628. /* Not enough data in the sg element, so it
  629. * needs to be buffered into a blocksize chunk
  630. */
  631. int cp_len = ccp_fill_queue_buf(src);
  632. op->soc = 1;
  633. op->src.u.dma.address = src->dm_wa.dma.address;
  634. op->src.u.dma.offset = 0;
  635. op->src.u.dma.length = (blocksize_op) ? block_size : cp_len;
  636. } else {
  637. /* Enough data in the sg element, but we need to
  638. * adjust for any previously copied data
  639. */
  640. op->src.u.dma.address = sg_dma_address(src->sg_wa.sg);
  641. op->src.u.dma.offset = src->sg_wa.sg_used;
  642. op->src.u.dma.length = op_len & ~(block_size - 1);
  643. ccp_update_sg_workarea(&src->sg_wa, op->src.u.dma.length);
  644. }
  645. if (dst) {
  646. if (sg_dst_len < block_size) {
  647. /* Not enough room in the sg element or we're on the
  648. * last piece of data (when using padding), so the
  649. * output needs to be buffered into a blocksize chunk
  650. */
  651. op->soc = 1;
  652. op->dst.u.dma.address = dst->dm_wa.dma.address;
  653. op->dst.u.dma.offset = 0;
  654. op->dst.u.dma.length = op->src.u.dma.length;
  655. } else {
  656. /* Enough room in the sg element, but we need to
  657. * adjust for any previously used area
  658. */
  659. op->dst.u.dma.address = sg_dma_address(dst->sg_wa.sg);
  660. op->dst.u.dma.offset = dst->sg_wa.sg_used;
  661. op->dst.u.dma.length = op->src.u.dma.length;
  662. }
  663. }
  664. }
  665. static void ccp_process_data(struct ccp_data *src, struct ccp_data *dst,
  666. struct ccp_op *op)
  667. {
  668. op->init = 0;
  669. if (dst) {
  670. if (op->dst.u.dma.address == dst->dm_wa.dma.address)
  671. ccp_empty_queue_buf(dst);
  672. else
  673. ccp_update_sg_workarea(&dst->sg_wa,
  674. op->dst.u.dma.length);
  675. }
  676. }
  677. static int ccp_copy_to_from_ksb(struct ccp_cmd_queue *cmd_q,
  678. struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
  679. u32 byte_swap, bool from)
  680. {
  681. struct ccp_op op;
  682. memset(&op, 0, sizeof(op));
  683. op.cmd_q = cmd_q;
  684. op.jobid = jobid;
  685. op.eom = 1;
  686. if (from) {
  687. op.soc = 1;
  688. op.src.type = CCP_MEMTYPE_KSB;
  689. op.src.u.ksb = ksb;
  690. op.dst.type = CCP_MEMTYPE_SYSTEM;
  691. op.dst.u.dma.address = wa->dma.address;
  692. op.dst.u.dma.length = wa->length;
  693. } else {
  694. op.src.type = CCP_MEMTYPE_SYSTEM;
  695. op.src.u.dma.address = wa->dma.address;
  696. op.src.u.dma.length = wa->length;
  697. op.dst.type = CCP_MEMTYPE_KSB;
  698. op.dst.u.ksb = ksb;
  699. }
  700. op.u.passthru.byte_swap = byte_swap;
  701. return ccp_perform_passthru(&op);
  702. }
  703. static int ccp_copy_to_ksb(struct ccp_cmd_queue *cmd_q,
  704. struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
  705. u32 byte_swap)
  706. {
  707. return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, false);
  708. }
  709. static int ccp_copy_from_ksb(struct ccp_cmd_queue *cmd_q,
  710. struct ccp_dm_workarea *wa, u32 jobid, u32 ksb,
  711. u32 byte_swap)
  712. {
  713. return ccp_copy_to_from_ksb(cmd_q, wa, jobid, ksb, byte_swap, true);
  714. }
  715. static int ccp_run_aes_cmac_cmd(struct ccp_cmd_queue *cmd_q,
  716. struct ccp_cmd *cmd)
  717. {
  718. struct ccp_aes_engine *aes = &cmd->u.aes;
  719. struct ccp_dm_workarea key, ctx;
  720. struct ccp_data src;
  721. struct ccp_op op;
  722. unsigned int dm_offset;
  723. int ret;
  724. if (!((aes->key_len == AES_KEYSIZE_128) ||
  725. (aes->key_len == AES_KEYSIZE_192) ||
  726. (aes->key_len == AES_KEYSIZE_256)))
  727. return -EINVAL;
  728. if (aes->src_len & (AES_BLOCK_SIZE - 1))
  729. return -EINVAL;
  730. if (aes->iv_len != AES_BLOCK_SIZE)
  731. return -EINVAL;
  732. if (!aes->key || !aes->iv || !aes->src)
  733. return -EINVAL;
  734. if (aes->cmac_final) {
  735. if (aes->cmac_key_len != AES_BLOCK_SIZE)
  736. return -EINVAL;
  737. if (!aes->cmac_key)
  738. return -EINVAL;
  739. }
  740. BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
  741. BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
  742. ret = -EIO;
  743. memset(&op, 0, sizeof(op));
  744. op.cmd_q = cmd_q;
  745. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  746. op.ksb_key = cmd_q->ksb_key;
  747. op.ksb_ctx = cmd_q->ksb_ctx;
  748. op.init = 1;
  749. op.u.aes.type = aes->type;
  750. op.u.aes.mode = aes->mode;
  751. op.u.aes.action = aes->action;
  752. /* All supported key sizes fit in a single (32-byte) KSB entry
  753. * and must be in little endian format. Use the 256-bit byte
  754. * swap passthru option to convert from big endian to little
  755. * endian.
  756. */
  757. ret = ccp_init_dm_workarea(&key, cmd_q,
  758. CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
  759. DMA_TO_DEVICE);
  760. if (ret)
  761. return ret;
  762. dm_offset = CCP_KSB_BYTES - aes->key_len;
  763. ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
  764. ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
  765. CCP_PASSTHRU_BYTESWAP_256BIT);
  766. if (ret) {
  767. cmd->engine_error = cmd_q->cmd_error;
  768. goto e_key;
  769. }
  770. /* The AES context fits in a single (32-byte) KSB entry and
  771. * must be in little endian format. Use the 256-bit byte swap
  772. * passthru option to convert from big endian to little endian.
  773. */
  774. ret = ccp_init_dm_workarea(&ctx, cmd_q,
  775. CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
  776. DMA_BIDIRECTIONAL);
  777. if (ret)
  778. goto e_key;
  779. dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
  780. ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
  781. ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  782. CCP_PASSTHRU_BYTESWAP_256BIT);
  783. if (ret) {
  784. cmd->engine_error = cmd_q->cmd_error;
  785. goto e_ctx;
  786. }
  787. /* Send data to the CCP AES engine */
  788. ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
  789. AES_BLOCK_SIZE, DMA_TO_DEVICE);
  790. if (ret)
  791. goto e_ctx;
  792. while (src.sg_wa.bytes_left) {
  793. ccp_prepare_data(&src, NULL, &op, AES_BLOCK_SIZE, true);
  794. if (aes->cmac_final && !src.sg_wa.bytes_left) {
  795. op.eom = 1;
  796. /* Push the K1/K2 key to the CCP now */
  797. ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid,
  798. op.ksb_ctx,
  799. CCP_PASSTHRU_BYTESWAP_256BIT);
  800. if (ret) {
  801. cmd->engine_error = cmd_q->cmd_error;
  802. goto e_src;
  803. }
  804. ccp_set_dm_area(&ctx, 0, aes->cmac_key, 0,
  805. aes->cmac_key_len);
  806. ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  807. CCP_PASSTHRU_BYTESWAP_256BIT);
  808. if (ret) {
  809. cmd->engine_error = cmd_q->cmd_error;
  810. goto e_src;
  811. }
  812. }
  813. ret = ccp_perform_aes(&op);
  814. if (ret) {
  815. cmd->engine_error = cmd_q->cmd_error;
  816. goto e_src;
  817. }
  818. ccp_process_data(&src, NULL, &op);
  819. }
  820. /* Retrieve the AES context - convert from LE to BE using
  821. * 32-byte (256-bit) byteswapping
  822. */
  823. ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  824. CCP_PASSTHRU_BYTESWAP_256BIT);
  825. if (ret) {
  826. cmd->engine_error = cmd_q->cmd_error;
  827. goto e_src;
  828. }
  829. /* ...but we only need AES_BLOCK_SIZE bytes */
  830. dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
  831. ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
  832. e_src:
  833. ccp_free_data(&src, cmd_q);
  834. e_ctx:
  835. ccp_dm_free(&ctx);
  836. e_key:
  837. ccp_dm_free(&key);
  838. return ret;
  839. }
  840. static int ccp_run_aes_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  841. {
  842. struct ccp_aes_engine *aes = &cmd->u.aes;
  843. struct ccp_dm_workarea key, ctx;
  844. struct ccp_data src, dst;
  845. struct ccp_op op;
  846. unsigned int dm_offset;
  847. bool in_place = false;
  848. int ret;
  849. if (aes->mode == CCP_AES_MODE_CMAC)
  850. return ccp_run_aes_cmac_cmd(cmd_q, cmd);
  851. if (!((aes->key_len == AES_KEYSIZE_128) ||
  852. (aes->key_len == AES_KEYSIZE_192) ||
  853. (aes->key_len == AES_KEYSIZE_256)))
  854. return -EINVAL;
  855. if (((aes->mode == CCP_AES_MODE_ECB) ||
  856. (aes->mode == CCP_AES_MODE_CBC) ||
  857. (aes->mode == CCP_AES_MODE_CFB)) &&
  858. (aes->src_len & (AES_BLOCK_SIZE - 1)))
  859. return -EINVAL;
  860. if (!aes->key || !aes->src || !aes->dst)
  861. return -EINVAL;
  862. if (aes->mode != CCP_AES_MODE_ECB) {
  863. if (aes->iv_len != AES_BLOCK_SIZE)
  864. return -EINVAL;
  865. if (!aes->iv)
  866. return -EINVAL;
  867. }
  868. BUILD_BUG_ON(CCP_AES_KEY_KSB_COUNT != 1);
  869. BUILD_BUG_ON(CCP_AES_CTX_KSB_COUNT != 1);
  870. ret = -EIO;
  871. memset(&op, 0, sizeof(op));
  872. op.cmd_q = cmd_q;
  873. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  874. op.ksb_key = cmd_q->ksb_key;
  875. op.ksb_ctx = cmd_q->ksb_ctx;
  876. op.init = (aes->mode == CCP_AES_MODE_ECB) ? 0 : 1;
  877. op.u.aes.type = aes->type;
  878. op.u.aes.mode = aes->mode;
  879. op.u.aes.action = aes->action;
  880. /* All supported key sizes fit in a single (32-byte) KSB entry
  881. * and must be in little endian format. Use the 256-bit byte
  882. * swap passthru option to convert from big endian to little
  883. * endian.
  884. */
  885. ret = ccp_init_dm_workarea(&key, cmd_q,
  886. CCP_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
  887. DMA_TO_DEVICE);
  888. if (ret)
  889. return ret;
  890. dm_offset = CCP_KSB_BYTES - aes->key_len;
  891. ccp_set_dm_area(&key, dm_offset, aes->key, 0, aes->key_len);
  892. ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
  893. CCP_PASSTHRU_BYTESWAP_256BIT);
  894. if (ret) {
  895. cmd->engine_error = cmd_q->cmd_error;
  896. goto e_key;
  897. }
  898. /* The AES context fits in a single (32-byte) KSB entry and
  899. * must be in little endian format. Use the 256-bit byte swap
  900. * passthru option to convert from big endian to little endian.
  901. */
  902. ret = ccp_init_dm_workarea(&ctx, cmd_q,
  903. CCP_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
  904. DMA_BIDIRECTIONAL);
  905. if (ret)
  906. goto e_key;
  907. if (aes->mode != CCP_AES_MODE_ECB) {
  908. /* Load the AES context - conver to LE */
  909. dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
  910. ccp_set_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
  911. ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  912. CCP_PASSTHRU_BYTESWAP_256BIT);
  913. if (ret) {
  914. cmd->engine_error = cmd_q->cmd_error;
  915. goto e_ctx;
  916. }
  917. }
  918. /* Prepare the input and output data workareas. For in-place
  919. * operations we need to set the dma direction to BIDIRECTIONAL
  920. * and copy the src workarea to the dst workarea.
  921. */
  922. if (sg_virt(aes->src) == sg_virt(aes->dst))
  923. in_place = true;
  924. ret = ccp_init_data(&src, cmd_q, aes->src, aes->src_len,
  925. AES_BLOCK_SIZE,
  926. in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  927. if (ret)
  928. goto e_ctx;
  929. if (in_place) {
  930. dst = src;
  931. } else {
  932. ret = ccp_init_data(&dst, cmd_q, aes->dst, aes->src_len,
  933. AES_BLOCK_SIZE, DMA_FROM_DEVICE);
  934. if (ret)
  935. goto e_src;
  936. }
  937. /* Send data to the CCP AES engine */
  938. while (src.sg_wa.bytes_left) {
  939. ccp_prepare_data(&src, &dst, &op, AES_BLOCK_SIZE, true);
  940. if (!src.sg_wa.bytes_left) {
  941. op.eom = 1;
  942. /* Since we don't retrieve the AES context in ECB
  943. * mode we have to wait for the operation to complete
  944. * on the last piece of data
  945. */
  946. if (aes->mode == CCP_AES_MODE_ECB)
  947. op.soc = 1;
  948. }
  949. ret = ccp_perform_aes(&op);
  950. if (ret) {
  951. cmd->engine_error = cmd_q->cmd_error;
  952. goto e_dst;
  953. }
  954. ccp_process_data(&src, &dst, &op);
  955. }
  956. if (aes->mode != CCP_AES_MODE_ECB) {
  957. /* Retrieve the AES context - convert from LE to BE using
  958. * 32-byte (256-bit) byteswapping
  959. */
  960. ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  961. CCP_PASSTHRU_BYTESWAP_256BIT);
  962. if (ret) {
  963. cmd->engine_error = cmd_q->cmd_error;
  964. goto e_dst;
  965. }
  966. /* ...but we only need AES_BLOCK_SIZE bytes */
  967. dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
  968. ccp_get_dm_area(&ctx, dm_offset, aes->iv, 0, aes->iv_len);
  969. }
  970. e_dst:
  971. if (!in_place)
  972. ccp_free_data(&dst, cmd_q);
  973. e_src:
  974. ccp_free_data(&src, cmd_q);
  975. e_ctx:
  976. ccp_dm_free(&ctx);
  977. e_key:
  978. ccp_dm_free(&key);
  979. return ret;
  980. }
  981. static int ccp_run_xts_aes_cmd(struct ccp_cmd_queue *cmd_q,
  982. struct ccp_cmd *cmd)
  983. {
  984. struct ccp_xts_aes_engine *xts = &cmd->u.xts;
  985. struct ccp_dm_workarea key, ctx;
  986. struct ccp_data src, dst;
  987. struct ccp_op op;
  988. unsigned int unit_size, dm_offset;
  989. bool in_place = false;
  990. int ret;
  991. switch (xts->unit_size) {
  992. case CCP_XTS_AES_UNIT_SIZE_16:
  993. unit_size = 16;
  994. break;
  995. case CCP_XTS_AES_UNIT_SIZE_512:
  996. unit_size = 512;
  997. break;
  998. case CCP_XTS_AES_UNIT_SIZE_1024:
  999. unit_size = 1024;
  1000. break;
  1001. case CCP_XTS_AES_UNIT_SIZE_2048:
  1002. unit_size = 2048;
  1003. break;
  1004. case CCP_XTS_AES_UNIT_SIZE_4096:
  1005. unit_size = 4096;
  1006. break;
  1007. default:
  1008. return -EINVAL;
  1009. }
  1010. if (xts->key_len != AES_KEYSIZE_128)
  1011. return -EINVAL;
  1012. if (!xts->final && (xts->src_len & (AES_BLOCK_SIZE - 1)))
  1013. return -EINVAL;
  1014. if (xts->iv_len != AES_BLOCK_SIZE)
  1015. return -EINVAL;
  1016. if (!xts->key || !xts->iv || !xts->src || !xts->dst)
  1017. return -EINVAL;
  1018. BUILD_BUG_ON(CCP_XTS_AES_KEY_KSB_COUNT != 1);
  1019. BUILD_BUG_ON(CCP_XTS_AES_CTX_KSB_COUNT != 1);
  1020. ret = -EIO;
  1021. memset(&op, 0, sizeof(op));
  1022. op.cmd_q = cmd_q;
  1023. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  1024. op.ksb_key = cmd_q->ksb_key;
  1025. op.ksb_ctx = cmd_q->ksb_ctx;
  1026. op.init = 1;
  1027. op.u.xts.action = xts->action;
  1028. op.u.xts.unit_size = xts->unit_size;
  1029. /* All supported key sizes fit in a single (32-byte) KSB entry
  1030. * and must be in little endian format. Use the 256-bit byte
  1031. * swap passthru option to convert from big endian to little
  1032. * endian.
  1033. */
  1034. ret = ccp_init_dm_workarea(&key, cmd_q,
  1035. CCP_XTS_AES_KEY_KSB_COUNT * CCP_KSB_BYTES,
  1036. DMA_TO_DEVICE);
  1037. if (ret)
  1038. return ret;
  1039. dm_offset = CCP_KSB_BYTES - AES_KEYSIZE_128;
  1040. ccp_set_dm_area(&key, dm_offset, xts->key, 0, xts->key_len);
  1041. ccp_set_dm_area(&key, 0, xts->key, dm_offset, xts->key_len);
  1042. ret = ccp_copy_to_ksb(cmd_q, &key, op.jobid, op.ksb_key,
  1043. CCP_PASSTHRU_BYTESWAP_256BIT);
  1044. if (ret) {
  1045. cmd->engine_error = cmd_q->cmd_error;
  1046. goto e_key;
  1047. }
  1048. /* The AES context fits in a single (32-byte) KSB entry and
  1049. * for XTS is already in little endian format so no byte swapping
  1050. * is needed.
  1051. */
  1052. ret = ccp_init_dm_workarea(&ctx, cmd_q,
  1053. CCP_XTS_AES_CTX_KSB_COUNT * CCP_KSB_BYTES,
  1054. DMA_BIDIRECTIONAL);
  1055. if (ret)
  1056. goto e_key;
  1057. ccp_set_dm_area(&ctx, 0, xts->iv, 0, xts->iv_len);
  1058. ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  1059. CCP_PASSTHRU_BYTESWAP_NOOP);
  1060. if (ret) {
  1061. cmd->engine_error = cmd_q->cmd_error;
  1062. goto e_ctx;
  1063. }
  1064. /* Prepare the input and output data workareas. For in-place
  1065. * operations we need to set the dma direction to BIDIRECTIONAL
  1066. * and copy the src workarea to the dst workarea.
  1067. */
  1068. if (sg_virt(xts->src) == sg_virt(xts->dst))
  1069. in_place = true;
  1070. ret = ccp_init_data(&src, cmd_q, xts->src, xts->src_len,
  1071. unit_size,
  1072. in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  1073. if (ret)
  1074. goto e_ctx;
  1075. if (in_place) {
  1076. dst = src;
  1077. } else {
  1078. ret = ccp_init_data(&dst, cmd_q, xts->dst, xts->src_len,
  1079. unit_size, DMA_FROM_DEVICE);
  1080. if (ret)
  1081. goto e_src;
  1082. }
  1083. /* Send data to the CCP AES engine */
  1084. while (src.sg_wa.bytes_left) {
  1085. ccp_prepare_data(&src, &dst, &op, unit_size, true);
  1086. if (!src.sg_wa.bytes_left)
  1087. op.eom = 1;
  1088. ret = ccp_perform_xts_aes(&op);
  1089. if (ret) {
  1090. cmd->engine_error = cmd_q->cmd_error;
  1091. goto e_dst;
  1092. }
  1093. ccp_process_data(&src, &dst, &op);
  1094. }
  1095. /* Retrieve the AES context - convert from LE to BE using
  1096. * 32-byte (256-bit) byteswapping
  1097. */
  1098. ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  1099. CCP_PASSTHRU_BYTESWAP_256BIT);
  1100. if (ret) {
  1101. cmd->engine_error = cmd_q->cmd_error;
  1102. goto e_dst;
  1103. }
  1104. /* ...but we only need AES_BLOCK_SIZE bytes */
  1105. dm_offset = CCP_KSB_BYTES - AES_BLOCK_SIZE;
  1106. ccp_get_dm_area(&ctx, dm_offset, xts->iv, 0, xts->iv_len);
  1107. e_dst:
  1108. if (!in_place)
  1109. ccp_free_data(&dst, cmd_q);
  1110. e_src:
  1111. ccp_free_data(&src, cmd_q);
  1112. e_ctx:
  1113. ccp_dm_free(&ctx);
  1114. e_key:
  1115. ccp_dm_free(&key);
  1116. return ret;
  1117. }
  1118. static int ccp_run_sha_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1119. {
  1120. struct ccp_sha_engine *sha = &cmd->u.sha;
  1121. struct ccp_dm_workarea ctx;
  1122. struct ccp_data src;
  1123. struct ccp_op op;
  1124. int ret;
  1125. if (sha->ctx_len != CCP_SHA_CTXSIZE)
  1126. return -EINVAL;
  1127. if (!sha->ctx)
  1128. return -EINVAL;
  1129. if (!sha->final && (sha->src_len & (CCP_SHA_BLOCKSIZE - 1)))
  1130. return -EINVAL;
  1131. if (!sha->src_len) {
  1132. const u8 *sha_zero;
  1133. /* Not final, just return */
  1134. if (!sha->final)
  1135. return 0;
  1136. /* CCP can't do a zero length sha operation so the caller
  1137. * must buffer the data.
  1138. */
  1139. if (sha->msg_bits)
  1140. return -EINVAL;
  1141. /* A sha operation for a message with a total length of zero,
  1142. * return known result.
  1143. */
  1144. switch (sha->type) {
  1145. case CCP_SHA_TYPE_1:
  1146. sha_zero = ccp_sha1_zero;
  1147. break;
  1148. case CCP_SHA_TYPE_224:
  1149. sha_zero = ccp_sha224_zero;
  1150. break;
  1151. case CCP_SHA_TYPE_256:
  1152. sha_zero = ccp_sha256_zero;
  1153. break;
  1154. default:
  1155. return -EINVAL;
  1156. }
  1157. scatterwalk_map_and_copy((void *)sha_zero, sha->ctx, 0,
  1158. sha->ctx_len, 1);
  1159. return 0;
  1160. }
  1161. if (!sha->src)
  1162. return -EINVAL;
  1163. BUILD_BUG_ON(CCP_SHA_KSB_COUNT != 1);
  1164. memset(&op, 0, sizeof(op));
  1165. op.cmd_q = cmd_q;
  1166. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  1167. op.ksb_ctx = cmd_q->ksb_ctx;
  1168. op.u.sha.type = sha->type;
  1169. op.u.sha.msg_bits = sha->msg_bits;
  1170. /* The SHA context fits in a single (32-byte) KSB entry and
  1171. * must be in little endian format. Use the 256-bit byte swap
  1172. * passthru option to convert from big endian to little endian.
  1173. */
  1174. ret = ccp_init_dm_workarea(&ctx, cmd_q,
  1175. CCP_SHA_KSB_COUNT * CCP_KSB_BYTES,
  1176. DMA_BIDIRECTIONAL);
  1177. if (ret)
  1178. return ret;
  1179. if (sha->first) {
  1180. const __be32 *init;
  1181. switch (sha->type) {
  1182. case CCP_SHA_TYPE_1:
  1183. init = ccp_sha1_init;
  1184. break;
  1185. case CCP_SHA_TYPE_224:
  1186. init = ccp_sha224_init;
  1187. break;
  1188. case CCP_SHA_TYPE_256:
  1189. init = ccp_sha256_init;
  1190. break;
  1191. default:
  1192. ret = -EINVAL;
  1193. goto e_ctx;
  1194. }
  1195. memcpy(ctx.address, init, CCP_SHA_CTXSIZE);
  1196. } else {
  1197. ccp_set_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
  1198. }
  1199. ret = ccp_copy_to_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  1200. CCP_PASSTHRU_BYTESWAP_256BIT);
  1201. if (ret) {
  1202. cmd->engine_error = cmd_q->cmd_error;
  1203. goto e_ctx;
  1204. }
  1205. /* Send data to the CCP SHA engine */
  1206. ret = ccp_init_data(&src, cmd_q, sha->src, sha->src_len,
  1207. CCP_SHA_BLOCKSIZE, DMA_TO_DEVICE);
  1208. if (ret)
  1209. goto e_ctx;
  1210. while (src.sg_wa.bytes_left) {
  1211. ccp_prepare_data(&src, NULL, &op, CCP_SHA_BLOCKSIZE, false);
  1212. if (sha->final && !src.sg_wa.bytes_left)
  1213. op.eom = 1;
  1214. ret = ccp_perform_sha(&op);
  1215. if (ret) {
  1216. cmd->engine_error = cmd_q->cmd_error;
  1217. goto e_data;
  1218. }
  1219. ccp_process_data(&src, NULL, &op);
  1220. }
  1221. /* Retrieve the SHA context - convert from LE to BE using
  1222. * 32-byte (256-bit) byteswapping to BE
  1223. */
  1224. ret = ccp_copy_from_ksb(cmd_q, &ctx, op.jobid, op.ksb_ctx,
  1225. CCP_PASSTHRU_BYTESWAP_256BIT);
  1226. if (ret) {
  1227. cmd->engine_error = cmd_q->cmd_error;
  1228. goto e_data;
  1229. }
  1230. ccp_get_dm_area(&ctx, 0, sha->ctx, 0, sha->ctx_len);
  1231. if (sha->final && sha->opad) {
  1232. /* HMAC operation, recursively perform final SHA */
  1233. struct ccp_cmd hmac_cmd;
  1234. struct scatterlist sg;
  1235. u64 block_size, digest_size;
  1236. u8 *hmac_buf;
  1237. switch (sha->type) {
  1238. case CCP_SHA_TYPE_1:
  1239. block_size = SHA1_BLOCK_SIZE;
  1240. digest_size = SHA1_DIGEST_SIZE;
  1241. break;
  1242. case CCP_SHA_TYPE_224:
  1243. block_size = SHA224_BLOCK_SIZE;
  1244. digest_size = SHA224_DIGEST_SIZE;
  1245. break;
  1246. case CCP_SHA_TYPE_256:
  1247. block_size = SHA256_BLOCK_SIZE;
  1248. digest_size = SHA256_DIGEST_SIZE;
  1249. break;
  1250. default:
  1251. ret = -EINVAL;
  1252. goto e_data;
  1253. }
  1254. if (sha->opad_len != block_size) {
  1255. ret = -EINVAL;
  1256. goto e_data;
  1257. }
  1258. hmac_buf = kmalloc(block_size + digest_size, GFP_KERNEL);
  1259. if (!hmac_buf) {
  1260. ret = -ENOMEM;
  1261. goto e_data;
  1262. }
  1263. sg_init_one(&sg, hmac_buf, block_size + digest_size);
  1264. scatterwalk_map_and_copy(hmac_buf, sha->opad, 0, block_size, 0);
  1265. memcpy(hmac_buf + block_size, ctx.address, digest_size);
  1266. memset(&hmac_cmd, 0, sizeof(hmac_cmd));
  1267. hmac_cmd.engine = CCP_ENGINE_SHA;
  1268. hmac_cmd.u.sha.type = sha->type;
  1269. hmac_cmd.u.sha.ctx = sha->ctx;
  1270. hmac_cmd.u.sha.ctx_len = sha->ctx_len;
  1271. hmac_cmd.u.sha.src = &sg;
  1272. hmac_cmd.u.sha.src_len = block_size + digest_size;
  1273. hmac_cmd.u.sha.opad = NULL;
  1274. hmac_cmd.u.sha.opad_len = 0;
  1275. hmac_cmd.u.sha.first = 1;
  1276. hmac_cmd.u.sha.final = 1;
  1277. hmac_cmd.u.sha.msg_bits = (block_size + digest_size) << 3;
  1278. ret = ccp_run_sha_cmd(cmd_q, &hmac_cmd);
  1279. if (ret)
  1280. cmd->engine_error = hmac_cmd.engine_error;
  1281. kfree(hmac_buf);
  1282. }
  1283. e_data:
  1284. ccp_free_data(&src, cmd_q);
  1285. e_ctx:
  1286. ccp_dm_free(&ctx);
  1287. return ret;
  1288. }
  1289. static int ccp_run_rsa_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1290. {
  1291. struct ccp_rsa_engine *rsa = &cmd->u.rsa;
  1292. struct ccp_dm_workarea exp, src;
  1293. struct ccp_data dst;
  1294. struct ccp_op op;
  1295. unsigned int ksb_count, i_len, o_len;
  1296. int ret;
  1297. if (rsa->key_size > CCP_RSA_MAX_WIDTH)
  1298. return -EINVAL;
  1299. if (!rsa->exp || !rsa->mod || !rsa->src || !rsa->dst)
  1300. return -EINVAL;
  1301. /* The RSA modulus must precede the message being acted upon, so
  1302. * it must be copied to a DMA area where the message and the
  1303. * modulus can be concatenated. Therefore the input buffer
  1304. * length required is twice the output buffer length (which
  1305. * must be a multiple of 256-bits).
  1306. */
  1307. o_len = ((rsa->key_size + 255) / 256) * 32;
  1308. i_len = o_len * 2;
  1309. ksb_count = o_len / CCP_KSB_BYTES;
  1310. memset(&op, 0, sizeof(op));
  1311. op.cmd_q = cmd_q;
  1312. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  1313. op.ksb_key = ccp_alloc_ksb(cmd_q->ccp, ksb_count);
  1314. if (!op.ksb_key)
  1315. return -EIO;
  1316. /* The RSA exponent may span multiple (32-byte) KSB entries and must
  1317. * be in little endian format. Reverse copy each 32-byte chunk
  1318. * of the exponent (En chunk to E0 chunk, E(n-1) chunk to E1 chunk)
  1319. * and each byte within that chunk and do not perform any byte swap
  1320. * operations on the passthru operation.
  1321. */
  1322. ret = ccp_init_dm_workarea(&exp, cmd_q, o_len, DMA_TO_DEVICE);
  1323. if (ret)
  1324. goto e_ksb;
  1325. ret = ccp_reverse_set_dm_area(&exp, rsa->exp, rsa->exp_len,
  1326. CCP_KSB_BYTES, false);
  1327. if (ret)
  1328. goto e_exp;
  1329. ret = ccp_copy_to_ksb(cmd_q, &exp, op.jobid, op.ksb_key,
  1330. CCP_PASSTHRU_BYTESWAP_NOOP);
  1331. if (ret) {
  1332. cmd->engine_error = cmd_q->cmd_error;
  1333. goto e_exp;
  1334. }
  1335. /* Concatenate the modulus and the message. Both the modulus and
  1336. * the operands must be in little endian format. Since the input
  1337. * is in big endian format it must be converted.
  1338. */
  1339. ret = ccp_init_dm_workarea(&src, cmd_q, i_len, DMA_TO_DEVICE);
  1340. if (ret)
  1341. goto e_exp;
  1342. ret = ccp_reverse_set_dm_area(&src, rsa->mod, rsa->mod_len,
  1343. CCP_KSB_BYTES, false);
  1344. if (ret)
  1345. goto e_src;
  1346. src.address += o_len; /* Adjust the address for the copy operation */
  1347. ret = ccp_reverse_set_dm_area(&src, rsa->src, rsa->src_len,
  1348. CCP_KSB_BYTES, false);
  1349. if (ret)
  1350. goto e_src;
  1351. src.address -= o_len; /* Reset the address to original value */
  1352. /* Prepare the output area for the operation */
  1353. ret = ccp_init_data(&dst, cmd_q, rsa->dst, rsa->mod_len,
  1354. o_len, DMA_FROM_DEVICE);
  1355. if (ret)
  1356. goto e_src;
  1357. op.soc = 1;
  1358. op.src.u.dma.address = src.dma.address;
  1359. op.src.u.dma.offset = 0;
  1360. op.src.u.dma.length = i_len;
  1361. op.dst.u.dma.address = dst.dm_wa.dma.address;
  1362. op.dst.u.dma.offset = 0;
  1363. op.dst.u.dma.length = o_len;
  1364. op.u.rsa.mod_size = rsa->key_size;
  1365. op.u.rsa.input_len = i_len;
  1366. ret = ccp_perform_rsa(&op);
  1367. if (ret) {
  1368. cmd->engine_error = cmd_q->cmd_error;
  1369. goto e_dst;
  1370. }
  1371. ccp_reverse_get_dm_area(&dst.dm_wa, rsa->dst, rsa->mod_len);
  1372. e_dst:
  1373. ccp_free_data(&dst, cmd_q);
  1374. e_src:
  1375. ccp_dm_free(&src);
  1376. e_exp:
  1377. ccp_dm_free(&exp);
  1378. e_ksb:
  1379. ccp_free_ksb(cmd_q->ccp, op.ksb_key, ksb_count);
  1380. return ret;
  1381. }
  1382. static int ccp_run_passthru_cmd(struct ccp_cmd_queue *cmd_q,
  1383. struct ccp_cmd *cmd)
  1384. {
  1385. struct ccp_passthru_engine *pt = &cmd->u.passthru;
  1386. struct ccp_dm_workarea mask;
  1387. struct ccp_data src, dst;
  1388. struct ccp_op op;
  1389. bool in_place = false;
  1390. unsigned int i;
  1391. int ret;
  1392. if (!pt->final && (pt->src_len & (CCP_PASSTHRU_BLOCKSIZE - 1)))
  1393. return -EINVAL;
  1394. if (!pt->src || !pt->dst)
  1395. return -EINVAL;
  1396. if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
  1397. if (pt->mask_len != CCP_PASSTHRU_MASKSIZE)
  1398. return -EINVAL;
  1399. if (!pt->mask)
  1400. return -EINVAL;
  1401. }
  1402. BUILD_BUG_ON(CCP_PASSTHRU_KSB_COUNT != 1);
  1403. memset(&op, 0, sizeof(op));
  1404. op.cmd_q = cmd_q;
  1405. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  1406. if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP) {
  1407. /* Load the mask */
  1408. op.ksb_key = cmd_q->ksb_key;
  1409. ret = ccp_init_dm_workarea(&mask, cmd_q,
  1410. CCP_PASSTHRU_KSB_COUNT *
  1411. CCP_KSB_BYTES,
  1412. DMA_TO_DEVICE);
  1413. if (ret)
  1414. return ret;
  1415. ccp_set_dm_area(&mask, 0, pt->mask, 0, pt->mask_len);
  1416. ret = ccp_copy_to_ksb(cmd_q, &mask, op.jobid, op.ksb_key,
  1417. CCP_PASSTHRU_BYTESWAP_NOOP);
  1418. if (ret) {
  1419. cmd->engine_error = cmd_q->cmd_error;
  1420. goto e_mask;
  1421. }
  1422. }
  1423. /* Prepare the input and output data workareas. For in-place
  1424. * operations we need to set the dma direction to BIDIRECTIONAL
  1425. * and copy the src workarea to the dst workarea.
  1426. */
  1427. if (sg_virt(pt->src) == sg_virt(pt->dst))
  1428. in_place = true;
  1429. ret = ccp_init_data(&src, cmd_q, pt->src, pt->src_len,
  1430. CCP_PASSTHRU_MASKSIZE,
  1431. in_place ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE);
  1432. if (ret)
  1433. goto e_mask;
  1434. if (in_place) {
  1435. dst = src;
  1436. } else {
  1437. ret = ccp_init_data(&dst, cmd_q, pt->dst, pt->src_len,
  1438. CCP_PASSTHRU_MASKSIZE, DMA_FROM_DEVICE);
  1439. if (ret)
  1440. goto e_src;
  1441. }
  1442. /* Send data to the CCP Passthru engine
  1443. * Because the CCP engine works on a single source and destination
  1444. * dma address at a time, each entry in the source scatterlist
  1445. * (after the dma_map_sg call) must be less than or equal to the
  1446. * (remaining) length in the destination scatterlist entry and the
  1447. * length must be a multiple of CCP_PASSTHRU_BLOCKSIZE
  1448. */
  1449. dst.sg_wa.sg_used = 0;
  1450. for (i = 1; i <= src.sg_wa.dma_count; i++) {
  1451. if (!dst.sg_wa.sg ||
  1452. (dst.sg_wa.sg->length < src.sg_wa.sg->length)) {
  1453. ret = -EINVAL;
  1454. goto e_dst;
  1455. }
  1456. if (i == src.sg_wa.dma_count) {
  1457. op.eom = 1;
  1458. op.soc = 1;
  1459. }
  1460. op.src.type = CCP_MEMTYPE_SYSTEM;
  1461. op.src.u.dma.address = sg_dma_address(src.sg_wa.sg);
  1462. op.src.u.dma.offset = 0;
  1463. op.src.u.dma.length = sg_dma_len(src.sg_wa.sg);
  1464. op.dst.type = CCP_MEMTYPE_SYSTEM;
  1465. op.dst.u.dma.address = sg_dma_address(dst.sg_wa.sg);
  1466. op.dst.u.dma.offset = dst.sg_wa.sg_used;
  1467. op.dst.u.dma.length = op.src.u.dma.length;
  1468. ret = ccp_perform_passthru(&op);
  1469. if (ret) {
  1470. cmd->engine_error = cmd_q->cmd_error;
  1471. goto e_dst;
  1472. }
  1473. dst.sg_wa.sg_used += src.sg_wa.sg->length;
  1474. if (dst.sg_wa.sg_used == dst.sg_wa.sg->length) {
  1475. dst.sg_wa.sg = sg_next(dst.sg_wa.sg);
  1476. dst.sg_wa.sg_used = 0;
  1477. }
  1478. src.sg_wa.sg = sg_next(src.sg_wa.sg);
  1479. }
  1480. e_dst:
  1481. if (!in_place)
  1482. ccp_free_data(&dst, cmd_q);
  1483. e_src:
  1484. ccp_free_data(&src, cmd_q);
  1485. e_mask:
  1486. if (pt->bit_mod != CCP_PASSTHRU_BITWISE_NOOP)
  1487. ccp_dm_free(&mask);
  1488. return ret;
  1489. }
  1490. static int ccp_run_ecc_mm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1491. {
  1492. struct ccp_ecc_engine *ecc = &cmd->u.ecc;
  1493. struct ccp_dm_workarea src, dst;
  1494. struct ccp_op op;
  1495. int ret;
  1496. u8 *save;
  1497. if (!ecc->u.mm.operand_1 ||
  1498. (ecc->u.mm.operand_1_len > CCP_ECC_MODULUS_BYTES))
  1499. return -EINVAL;
  1500. if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT)
  1501. if (!ecc->u.mm.operand_2 ||
  1502. (ecc->u.mm.operand_2_len > CCP_ECC_MODULUS_BYTES))
  1503. return -EINVAL;
  1504. if (!ecc->u.mm.result ||
  1505. (ecc->u.mm.result_len < CCP_ECC_MODULUS_BYTES))
  1506. return -EINVAL;
  1507. memset(&op, 0, sizeof(op));
  1508. op.cmd_q = cmd_q;
  1509. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  1510. /* Concatenate the modulus and the operands. Both the modulus and
  1511. * the operands must be in little endian format. Since the input
  1512. * is in big endian format it must be converted and placed in a
  1513. * fixed length buffer.
  1514. */
  1515. ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
  1516. DMA_TO_DEVICE);
  1517. if (ret)
  1518. return ret;
  1519. /* Save the workarea address since it is updated in order to perform
  1520. * the concatenation
  1521. */
  1522. save = src.address;
  1523. /* Copy the ECC modulus */
  1524. ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
  1525. CCP_ECC_OPERAND_SIZE, false);
  1526. if (ret)
  1527. goto e_src;
  1528. src.address += CCP_ECC_OPERAND_SIZE;
  1529. /* Copy the first operand */
  1530. ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_1,
  1531. ecc->u.mm.operand_1_len,
  1532. CCP_ECC_OPERAND_SIZE, false);
  1533. if (ret)
  1534. goto e_src;
  1535. src.address += CCP_ECC_OPERAND_SIZE;
  1536. if (ecc->function != CCP_ECC_FUNCTION_MINV_384BIT) {
  1537. /* Copy the second operand */
  1538. ret = ccp_reverse_set_dm_area(&src, ecc->u.mm.operand_2,
  1539. ecc->u.mm.operand_2_len,
  1540. CCP_ECC_OPERAND_SIZE, false);
  1541. if (ret)
  1542. goto e_src;
  1543. src.address += CCP_ECC_OPERAND_SIZE;
  1544. }
  1545. /* Restore the workarea address */
  1546. src.address = save;
  1547. /* Prepare the output area for the operation */
  1548. ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
  1549. DMA_FROM_DEVICE);
  1550. if (ret)
  1551. goto e_src;
  1552. op.soc = 1;
  1553. op.src.u.dma.address = src.dma.address;
  1554. op.src.u.dma.offset = 0;
  1555. op.src.u.dma.length = src.length;
  1556. op.dst.u.dma.address = dst.dma.address;
  1557. op.dst.u.dma.offset = 0;
  1558. op.dst.u.dma.length = dst.length;
  1559. op.u.ecc.function = cmd->u.ecc.function;
  1560. ret = ccp_perform_ecc(&op);
  1561. if (ret) {
  1562. cmd->engine_error = cmd_q->cmd_error;
  1563. goto e_dst;
  1564. }
  1565. ecc->ecc_result = le16_to_cpup(
  1566. (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
  1567. if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
  1568. ret = -EIO;
  1569. goto e_dst;
  1570. }
  1571. /* Save the ECC result */
  1572. ccp_reverse_get_dm_area(&dst, ecc->u.mm.result, CCP_ECC_MODULUS_BYTES);
  1573. e_dst:
  1574. ccp_dm_free(&dst);
  1575. e_src:
  1576. ccp_dm_free(&src);
  1577. return ret;
  1578. }
  1579. static int ccp_run_ecc_pm_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1580. {
  1581. struct ccp_ecc_engine *ecc = &cmd->u.ecc;
  1582. struct ccp_dm_workarea src, dst;
  1583. struct ccp_op op;
  1584. int ret;
  1585. u8 *save;
  1586. if (!ecc->u.pm.point_1.x ||
  1587. (ecc->u.pm.point_1.x_len > CCP_ECC_MODULUS_BYTES) ||
  1588. !ecc->u.pm.point_1.y ||
  1589. (ecc->u.pm.point_1.y_len > CCP_ECC_MODULUS_BYTES))
  1590. return -EINVAL;
  1591. if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
  1592. if (!ecc->u.pm.point_2.x ||
  1593. (ecc->u.pm.point_2.x_len > CCP_ECC_MODULUS_BYTES) ||
  1594. !ecc->u.pm.point_2.y ||
  1595. (ecc->u.pm.point_2.y_len > CCP_ECC_MODULUS_BYTES))
  1596. return -EINVAL;
  1597. } else {
  1598. if (!ecc->u.pm.domain_a ||
  1599. (ecc->u.pm.domain_a_len > CCP_ECC_MODULUS_BYTES))
  1600. return -EINVAL;
  1601. if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT)
  1602. if (!ecc->u.pm.scalar ||
  1603. (ecc->u.pm.scalar_len > CCP_ECC_MODULUS_BYTES))
  1604. return -EINVAL;
  1605. }
  1606. if (!ecc->u.pm.result.x ||
  1607. (ecc->u.pm.result.x_len < CCP_ECC_MODULUS_BYTES) ||
  1608. !ecc->u.pm.result.y ||
  1609. (ecc->u.pm.result.y_len < CCP_ECC_MODULUS_BYTES))
  1610. return -EINVAL;
  1611. memset(&op, 0, sizeof(op));
  1612. op.cmd_q = cmd_q;
  1613. op.jobid = ccp_gen_jobid(cmd_q->ccp);
  1614. /* Concatenate the modulus and the operands. Both the modulus and
  1615. * the operands must be in little endian format. Since the input
  1616. * is in big endian format it must be converted and placed in a
  1617. * fixed length buffer.
  1618. */
  1619. ret = ccp_init_dm_workarea(&src, cmd_q, CCP_ECC_SRC_BUF_SIZE,
  1620. DMA_TO_DEVICE);
  1621. if (ret)
  1622. return ret;
  1623. /* Save the workarea address since it is updated in order to perform
  1624. * the concatenation
  1625. */
  1626. save = src.address;
  1627. /* Copy the ECC modulus */
  1628. ret = ccp_reverse_set_dm_area(&src, ecc->mod, ecc->mod_len,
  1629. CCP_ECC_OPERAND_SIZE, false);
  1630. if (ret)
  1631. goto e_src;
  1632. src.address += CCP_ECC_OPERAND_SIZE;
  1633. /* Copy the first point X and Y coordinate */
  1634. ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.x,
  1635. ecc->u.pm.point_1.x_len,
  1636. CCP_ECC_OPERAND_SIZE, false);
  1637. if (ret)
  1638. goto e_src;
  1639. src.address += CCP_ECC_OPERAND_SIZE;
  1640. ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_1.y,
  1641. ecc->u.pm.point_1.y_len,
  1642. CCP_ECC_OPERAND_SIZE, false);
  1643. if (ret)
  1644. goto e_src;
  1645. src.address += CCP_ECC_OPERAND_SIZE;
  1646. /* Set the first point Z coordianate to 1 */
  1647. *src.address = 0x01;
  1648. src.address += CCP_ECC_OPERAND_SIZE;
  1649. if (ecc->function == CCP_ECC_FUNCTION_PADD_384BIT) {
  1650. /* Copy the second point X and Y coordinate */
  1651. ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.x,
  1652. ecc->u.pm.point_2.x_len,
  1653. CCP_ECC_OPERAND_SIZE, false);
  1654. if (ret)
  1655. goto e_src;
  1656. src.address += CCP_ECC_OPERAND_SIZE;
  1657. ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.point_2.y,
  1658. ecc->u.pm.point_2.y_len,
  1659. CCP_ECC_OPERAND_SIZE, false);
  1660. if (ret)
  1661. goto e_src;
  1662. src.address += CCP_ECC_OPERAND_SIZE;
  1663. /* Set the second point Z coordianate to 1 */
  1664. *src.address = 0x01;
  1665. src.address += CCP_ECC_OPERAND_SIZE;
  1666. } else {
  1667. /* Copy the Domain "a" parameter */
  1668. ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.domain_a,
  1669. ecc->u.pm.domain_a_len,
  1670. CCP_ECC_OPERAND_SIZE, false);
  1671. if (ret)
  1672. goto e_src;
  1673. src.address += CCP_ECC_OPERAND_SIZE;
  1674. if (ecc->function == CCP_ECC_FUNCTION_PMUL_384BIT) {
  1675. /* Copy the scalar value */
  1676. ret = ccp_reverse_set_dm_area(&src, ecc->u.pm.scalar,
  1677. ecc->u.pm.scalar_len,
  1678. CCP_ECC_OPERAND_SIZE,
  1679. false);
  1680. if (ret)
  1681. goto e_src;
  1682. src.address += CCP_ECC_OPERAND_SIZE;
  1683. }
  1684. }
  1685. /* Restore the workarea address */
  1686. src.address = save;
  1687. /* Prepare the output area for the operation */
  1688. ret = ccp_init_dm_workarea(&dst, cmd_q, CCP_ECC_DST_BUF_SIZE,
  1689. DMA_FROM_DEVICE);
  1690. if (ret)
  1691. goto e_src;
  1692. op.soc = 1;
  1693. op.src.u.dma.address = src.dma.address;
  1694. op.src.u.dma.offset = 0;
  1695. op.src.u.dma.length = src.length;
  1696. op.dst.u.dma.address = dst.dma.address;
  1697. op.dst.u.dma.offset = 0;
  1698. op.dst.u.dma.length = dst.length;
  1699. op.u.ecc.function = cmd->u.ecc.function;
  1700. ret = ccp_perform_ecc(&op);
  1701. if (ret) {
  1702. cmd->engine_error = cmd_q->cmd_error;
  1703. goto e_dst;
  1704. }
  1705. ecc->ecc_result = le16_to_cpup(
  1706. (const __le16 *)(dst.address + CCP_ECC_RESULT_OFFSET));
  1707. if (!(ecc->ecc_result & CCP_ECC_RESULT_SUCCESS)) {
  1708. ret = -EIO;
  1709. goto e_dst;
  1710. }
  1711. /* Save the workarea address since it is updated as we walk through
  1712. * to copy the point math result
  1713. */
  1714. save = dst.address;
  1715. /* Save the ECC result X and Y coordinates */
  1716. ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.x,
  1717. CCP_ECC_MODULUS_BYTES);
  1718. dst.address += CCP_ECC_OUTPUT_SIZE;
  1719. ccp_reverse_get_dm_area(&dst, ecc->u.pm.result.y,
  1720. CCP_ECC_MODULUS_BYTES);
  1721. dst.address += CCP_ECC_OUTPUT_SIZE;
  1722. /* Restore the workarea address */
  1723. dst.address = save;
  1724. e_dst:
  1725. ccp_dm_free(&dst);
  1726. e_src:
  1727. ccp_dm_free(&src);
  1728. return ret;
  1729. }
  1730. static int ccp_run_ecc_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1731. {
  1732. struct ccp_ecc_engine *ecc = &cmd->u.ecc;
  1733. ecc->ecc_result = 0;
  1734. if (!ecc->mod ||
  1735. (ecc->mod_len > CCP_ECC_MODULUS_BYTES))
  1736. return -EINVAL;
  1737. switch (ecc->function) {
  1738. case CCP_ECC_FUNCTION_MMUL_384BIT:
  1739. case CCP_ECC_FUNCTION_MADD_384BIT:
  1740. case CCP_ECC_FUNCTION_MINV_384BIT:
  1741. return ccp_run_ecc_mm_cmd(cmd_q, cmd);
  1742. case CCP_ECC_FUNCTION_PADD_384BIT:
  1743. case CCP_ECC_FUNCTION_PMUL_384BIT:
  1744. case CCP_ECC_FUNCTION_PDBL_384BIT:
  1745. return ccp_run_ecc_pm_cmd(cmd_q, cmd);
  1746. default:
  1747. return -EINVAL;
  1748. }
  1749. }
  1750. int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd)
  1751. {
  1752. int ret;
  1753. cmd->engine_error = 0;
  1754. cmd_q->cmd_error = 0;
  1755. cmd_q->int_rcvd = 0;
  1756. cmd_q->free_slots = CMD_Q_DEPTH(ioread32(cmd_q->reg_status));
  1757. switch (cmd->engine) {
  1758. case CCP_ENGINE_AES:
  1759. ret = ccp_run_aes_cmd(cmd_q, cmd);
  1760. break;
  1761. case CCP_ENGINE_XTS_AES_128:
  1762. ret = ccp_run_xts_aes_cmd(cmd_q, cmd);
  1763. break;
  1764. case CCP_ENGINE_SHA:
  1765. ret = ccp_run_sha_cmd(cmd_q, cmd);
  1766. break;
  1767. case CCP_ENGINE_RSA:
  1768. ret = ccp_run_rsa_cmd(cmd_q, cmd);
  1769. break;
  1770. case CCP_ENGINE_PASSTHRU:
  1771. ret = ccp_run_passthru_cmd(cmd_q, cmd);
  1772. break;
  1773. case CCP_ENGINE_ECC:
  1774. ret = ccp_run_ecc_cmd(cmd_q, cmd);
  1775. break;
  1776. default:
  1777. ret = -EINVAL;
  1778. }
  1779. return ret;
  1780. }