hash_core.c 50 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968
  1. /*
  2. * Cryptographic API.
  3. * Support for Nomadik hardware crypto engine.
  4. * Copyright (C) ST-Ericsson SA 2010
  5. * Author: Shujuan Chen <shujuan.chen@stericsson.com> for ST-Ericsson
  6. * Author: Joakim Bech <joakim.xx.bech@stericsson.com> for ST-Ericsson
  7. * Author: Berne Hebark <berne.herbark@stericsson.com> for ST-Ericsson.
  8. * Author: Niklas Hernaeus <niklas.hernaeus@stericsson.com> for ST-Ericsson.
  9. * Author: Andreas Westin <andreas.westin@stericsson.com> for ST-Ericsson.
  10. * License terms: GNU General Public License (GPL) version 2
  11. */
  12. #define pr_fmt(fmt) "hashX hashX: " fmt
  13. #include <linux/clk.h>
  14. #include <linux/device.h>
  15. #include <linux/err.h>
  16. #include <linux/init.h>
  17. #include <linux/io.h>
  18. #include <linux/klist.h>
  19. #include <linux/kernel.h>
  20. #include <linux/module.h>
  21. #include <linux/platform_device.h>
  22. #include <linux/crypto.h>
  23. #include <linux/regulator/consumer.h>
  24. #include <linux/dmaengine.h>
  25. #include <linux/bitops.h>
  26. #include <crypto/internal/hash.h>
  27. #include <crypto/sha.h>
  28. #include <crypto/scatterwalk.h>
  29. #include <crypto/algapi.h>
  30. #include <linux/platform_data/crypto-ux500.h>
  31. #include "hash_alg.h"
  32. static int hash_mode;
  33. module_param(hash_mode, int, 0);
  34. MODULE_PARM_DESC(hash_mode, "CPU or DMA mode. CPU = 0 (default), DMA = 1");
  35. /**
  36. * Pre-calculated empty message digests.
  37. */
  38. static const u8 zero_message_hash_sha1[SHA1_DIGEST_SIZE] = {
  39. 0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d,
  40. 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90,
  41. 0xaf, 0xd8, 0x07, 0x09
  42. };
  43. static const u8 zero_message_hash_sha256[SHA256_DIGEST_SIZE] = {
  44. 0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14,
  45. 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24,
  46. 0x27, 0xae, 0x41, 0xe4, 0x64, 0x9b, 0x93, 0x4c,
  47. 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55
  48. };
  49. /* HMAC-SHA1, no key */
  50. static const u8 zero_message_hmac_sha1[SHA1_DIGEST_SIZE] = {
  51. 0xfb, 0xdb, 0x1d, 0x1b, 0x18, 0xaa, 0x6c, 0x08,
  52. 0x32, 0x4b, 0x7d, 0x64, 0xb7, 0x1f, 0xb7, 0x63,
  53. 0x70, 0x69, 0x0e, 0x1d
  54. };
  55. /* HMAC-SHA256, no key */
  56. static const u8 zero_message_hmac_sha256[SHA256_DIGEST_SIZE] = {
  57. 0xb6, 0x13, 0x67, 0x9a, 0x08, 0x14, 0xd9, 0xec,
  58. 0x77, 0x2f, 0x95, 0xd7, 0x78, 0xc3, 0x5f, 0xc5,
  59. 0xff, 0x16, 0x97, 0xc4, 0x93, 0x71, 0x56, 0x53,
  60. 0xc6, 0xc7, 0x12, 0x14, 0x42, 0x92, 0xc5, 0xad
  61. };
  62. /**
  63. * struct hash_driver_data - data specific to the driver.
  64. *
  65. * @device_list: A list of registered devices to choose from.
  66. * @device_allocation: A semaphore initialized with number of devices.
  67. */
  68. struct hash_driver_data {
  69. struct klist device_list;
  70. struct semaphore device_allocation;
  71. };
  72. static struct hash_driver_data driver_data;
  73. /* Declaration of functions */
  74. /**
  75. * hash_messagepad - Pads a message and write the nblw bits.
  76. * @device_data: Structure for the hash device.
  77. * @message: Last word of a message
  78. * @index_bytes: The number of bytes in the last message
  79. *
  80. * This function manages the final part of the digest calculation, when less
  81. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  82. *
  83. */
  84. static void hash_messagepad(struct hash_device_data *device_data,
  85. const u32 *message, u8 index_bytes);
  86. /**
  87. * release_hash_device - Releases a previously allocated hash device.
  88. * @device_data: Structure for the hash device.
  89. *
  90. */
  91. static void release_hash_device(struct hash_device_data *device_data)
  92. {
  93. spin_lock(&device_data->ctx_lock);
  94. device_data->current_ctx->device = NULL;
  95. device_data->current_ctx = NULL;
  96. spin_unlock(&device_data->ctx_lock);
  97. /*
  98. * The down_interruptible part for this semaphore is called in
  99. * cryp_get_device_data.
  100. */
  101. up(&driver_data.device_allocation);
  102. }
  103. static void hash_dma_setup_channel(struct hash_device_data *device_data,
  104. struct device *dev)
  105. {
  106. struct hash_platform_data *platform_data = dev->platform_data;
  107. struct dma_slave_config conf = {
  108. .direction = DMA_MEM_TO_DEV,
  109. .dst_addr = device_data->phybase + HASH_DMA_FIFO,
  110. .dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES,
  111. .dst_maxburst = 16,
  112. };
  113. dma_cap_zero(device_data->dma.mask);
  114. dma_cap_set(DMA_SLAVE, device_data->dma.mask);
  115. device_data->dma.cfg_mem2hash = platform_data->mem_to_engine;
  116. device_data->dma.chan_mem2hash =
  117. dma_request_channel(device_data->dma.mask,
  118. platform_data->dma_filter,
  119. device_data->dma.cfg_mem2hash);
  120. dmaengine_slave_config(device_data->dma.chan_mem2hash, &conf);
  121. init_completion(&device_data->dma.complete);
  122. }
  123. static void hash_dma_callback(void *data)
  124. {
  125. struct hash_ctx *ctx = data;
  126. complete(&ctx->device->dma.complete);
  127. }
  128. static int hash_set_dma_transfer(struct hash_ctx *ctx, struct scatterlist *sg,
  129. int len, enum dma_data_direction direction)
  130. {
  131. struct dma_async_tx_descriptor *desc = NULL;
  132. struct dma_chan *channel = NULL;
  133. dma_cookie_t cookie;
  134. if (direction != DMA_TO_DEVICE) {
  135. dev_err(ctx->device->dev, "%s: Invalid DMA direction\n",
  136. __func__);
  137. return -EFAULT;
  138. }
  139. sg->length = ALIGN(sg->length, HASH_DMA_ALIGN_SIZE);
  140. channel = ctx->device->dma.chan_mem2hash;
  141. ctx->device->dma.sg = sg;
  142. ctx->device->dma.sg_len = dma_map_sg(channel->device->dev,
  143. ctx->device->dma.sg, ctx->device->dma.nents,
  144. direction);
  145. if (!ctx->device->dma.sg_len) {
  146. dev_err(ctx->device->dev, "%s: Could not map the sg list (TO_DEVICE)\n",
  147. __func__);
  148. return -EFAULT;
  149. }
  150. dev_dbg(ctx->device->dev, "%s: Setting up DMA for buffer (TO_DEVICE)\n",
  151. __func__);
  152. desc = dmaengine_prep_slave_sg(channel,
  153. ctx->device->dma.sg, ctx->device->dma.sg_len,
  154. DMA_MEM_TO_DEV, DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
  155. if (!desc) {
  156. dev_err(ctx->device->dev,
  157. "%s: dmaengine_prep_slave_sg() failed!\n", __func__);
  158. return -EFAULT;
  159. }
  160. desc->callback = hash_dma_callback;
  161. desc->callback_param = ctx;
  162. cookie = dmaengine_submit(desc);
  163. dma_async_issue_pending(channel);
  164. return 0;
  165. }
  166. static void hash_dma_done(struct hash_ctx *ctx)
  167. {
  168. struct dma_chan *chan;
  169. chan = ctx->device->dma.chan_mem2hash;
  170. dmaengine_terminate_all(chan);
  171. dma_unmap_sg(chan->device->dev, ctx->device->dma.sg,
  172. ctx->device->dma.sg_len, DMA_TO_DEVICE);
  173. }
  174. static int hash_dma_write(struct hash_ctx *ctx,
  175. struct scatterlist *sg, int len)
  176. {
  177. int error = hash_set_dma_transfer(ctx, sg, len, DMA_TO_DEVICE);
  178. if (error) {
  179. dev_dbg(ctx->device->dev,
  180. "%s: hash_set_dma_transfer() failed\n", __func__);
  181. return error;
  182. }
  183. return len;
  184. }
  185. /**
  186. * get_empty_message_digest - Returns a pre-calculated digest for
  187. * the empty message.
  188. * @device_data: Structure for the hash device.
  189. * @zero_hash: Buffer to return the empty message digest.
  190. * @zero_hash_size: Hash size of the empty message digest.
  191. * @zero_digest: True if zero_digest returned.
  192. */
  193. static int get_empty_message_digest(
  194. struct hash_device_data *device_data,
  195. u8 *zero_hash, u32 *zero_hash_size, bool *zero_digest)
  196. {
  197. int ret = 0;
  198. struct hash_ctx *ctx = device_data->current_ctx;
  199. *zero_digest = false;
  200. /**
  201. * Caller responsible for ctx != NULL.
  202. */
  203. if (HASH_OPER_MODE_HASH == ctx->config.oper_mode) {
  204. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  205. memcpy(zero_hash, &zero_message_hash_sha1[0],
  206. SHA1_DIGEST_SIZE);
  207. *zero_hash_size = SHA1_DIGEST_SIZE;
  208. *zero_digest = true;
  209. } else if (HASH_ALGO_SHA256 ==
  210. ctx->config.algorithm) {
  211. memcpy(zero_hash, &zero_message_hash_sha256[0],
  212. SHA256_DIGEST_SIZE);
  213. *zero_hash_size = SHA256_DIGEST_SIZE;
  214. *zero_digest = true;
  215. } else {
  216. dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
  217. __func__);
  218. ret = -EINVAL;
  219. goto out;
  220. }
  221. } else if (HASH_OPER_MODE_HMAC == ctx->config.oper_mode) {
  222. if (!ctx->keylen) {
  223. if (HASH_ALGO_SHA1 == ctx->config.algorithm) {
  224. memcpy(zero_hash, &zero_message_hmac_sha1[0],
  225. SHA1_DIGEST_SIZE);
  226. *zero_hash_size = SHA1_DIGEST_SIZE;
  227. *zero_digest = true;
  228. } else if (HASH_ALGO_SHA256 == ctx->config.algorithm) {
  229. memcpy(zero_hash, &zero_message_hmac_sha256[0],
  230. SHA256_DIGEST_SIZE);
  231. *zero_hash_size = SHA256_DIGEST_SIZE;
  232. *zero_digest = true;
  233. } else {
  234. dev_err(device_data->dev, "%s: Incorrect algorithm!\n",
  235. __func__);
  236. ret = -EINVAL;
  237. goto out;
  238. }
  239. } else {
  240. dev_dbg(device_data->dev,
  241. "%s: Continue hash calculation, since hmac key available\n",
  242. __func__);
  243. }
  244. }
  245. out:
  246. return ret;
  247. }
  248. /**
  249. * hash_disable_power - Request to disable power and clock.
  250. * @device_data: Structure for the hash device.
  251. * @save_device_state: If true, saves the current hw state.
  252. *
  253. * This function request for disabling power (regulator) and clock,
  254. * and could also save current hw state.
  255. */
  256. static int hash_disable_power(struct hash_device_data *device_data,
  257. bool save_device_state)
  258. {
  259. int ret = 0;
  260. struct device *dev = device_data->dev;
  261. spin_lock(&device_data->power_state_lock);
  262. if (!device_data->power_state)
  263. goto out;
  264. if (save_device_state) {
  265. hash_save_state(device_data,
  266. &device_data->state);
  267. device_data->restore_dev_state = true;
  268. }
  269. clk_disable(device_data->clk);
  270. ret = regulator_disable(device_data->regulator);
  271. if (ret)
  272. dev_err(dev, "%s: regulator_disable() failed!\n", __func__);
  273. device_data->power_state = false;
  274. out:
  275. spin_unlock(&device_data->power_state_lock);
  276. return ret;
  277. }
  278. /**
  279. * hash_enable_power - Request to enable power and clock.
  280. * @device_data: Structure for the hash device.
  281. * @restore_device_state: If true, restores a previous saved hw state.
  282. *
  283. * This function request for enabling power (regulator) and clock,
  284. * and could also restore a previously saved hw state.
  285. */
  286. static int hash_enable_power(struct hash_device_data *device_data,
  287. bool restore_device_state)
  288. {
  289. int ret = 0;
  290. struct device *dev = device_data->dev;
  291. spin_lock(&device_data->power_state_lock);
  292. if (!device_data->power_state) {
  293. ret = regulator_enable(device_data->regulator);
  294. if (ret) {
  295. dev_err(dev, "%s: regulator_enable() failed!\n",
  296. __func__);
  297. goto out;
  298. }
  299. ret = clk_enable(device_data->clk);
  300. if (ret) {
  301. dev_err(dev, "%s: clk_enable() failed!\n", __func__);
  302. ret = regulator_disable(
  303. device_data->regulator);
  304. goto out;
  305. }
  306. device_data->power_state = true;
  307. }
  308. if (device_data->restore_dev_state) {
  309. if (restore_device_state) {
  310. device_data->restore_dev_state = false;
  311. hash_resume_state(device_data, &device_data->state);
  312. }
  313. }
  314. out:
  315. spin_unlock(&device_data->power_state_lock);
  316. return ret;
  317. }
  318. /**
  319. * hash_get_device_data - Checks for an available hash device and return it.
  320. * @hash_ctx: Structure for the hash context.
  321. * @device_data: Structure for the hash device.
  322. *
  323. * This function check for an available hash device and return it to
  324. * the caller.
  325. * Note! Caller need to release the device, calling up().
  326. */
  327. static int hash_get_device_data(struct hash_ctx *ctx,
  328. struct hash_device_data **device_data)
  329. {
  330. int ret;
  331. struct klist_iter device_iterator;
  332. struct klist_node *device_node;
  333. struct hash_device_data *local_device_data = NULL;
  334. /* Wait until a device is available */
  335. ret = down_interruptible(&driver_data.device_allocation);
  336. if (ret)
  337. return ret; /* Interrupted */
  338. /* Select a device */
  339. klist_iter_init(&driver_data.device_list, &device_iterator);
  340. device_node = klist_next(&device_iterator);
  341. while (device_node) {
  342. local_device_data = container_of(device_node,
  343. struct hash_device_data, list_node);
  344. spin_lock(&local_device_data->ctx_lock);
  345. /* current_ctx allocates a device, NULL = unallocated */
  346. if (local_device_data->current_ctx) {
  347. device_node = klist_next(&device_iterator);
  348. } else {
  349. local_device_data->current_ctx = ctx;
  350. ctx->device = local_device_data;
  351. spin_unlock(&local_device_data->ctx_lock);
  352. break;
  353. }
  354. spin_unlock(&local_device_data->ctx_lock);
  355. }
  356. klist_iter_exit(&device_iterator);
  357. if (!device_node) {
  358. /**
  359. * No free device found.
  360. * Since we allocated a device with down_interruptible, this
  361. * should not be able to happen.
  362. * Number of available devices, which are contained in
  363. * device_allocation, is therefore decremented by not doing
  364. * an up(device_allocation).
  365. */
  366. return -EBUSY;
  367. }
  368. *device_data = local_device_data;
  369. return 0;
  370. }
  371. /**
  372. * hash_hw_write_key - Writes the key to the hardware registries.
  373. *
  374. * @device_data: Structure for the hash device.
  375. * @key: Key to be written.
  376. * @keylen: The lengt of the key.
  377. *
  378. * Note! This function DOES NOT write to the NBLW registry, even though
  379. * specified in the the hw design spec. Either due to incorrect info in the
  380. * spec or due to a bug in the hw.
  381. */
  382. static void hash_hw_write_key(struct hash_device_data *device_data,
  383. const u8 *key, unsigned int keylen)
  384. {
  385. u32 word = 0;
  386. int nwords = 1;
  387. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  388. while (keylen >= 4) {
  389. u32 *key_word = (u32 *)key;
  390. HASH_SET_DIN(key_word, nwords);
  391. keylen -= 4;
  392. key += 4;
  393. }
  394. /* Take care of the remaining bytes in the last word */
  395. if (keylen) {
  396. word = 0;
  397. while (keylen) {
  398. word |= (key[keylen - 1] << (8 * (keylen - 1)));
  399. keylen--;
  400. }
  401. HASH_SET_DIN(&word, nwords);
  402. }
  403. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  404. cpu_relax();
  405. HASH_SET_DCAL;
  406. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  407. cpu_relax();
  408. }
  409. /**
  410. * init_hash_hw - Initialise the hash hardware for a new calculation.
  411. * @device_data: Structure for the hash device.
  412. * @ctx: The hash context.
  413. *
  414. * This function will enable the bits needed to clear and start a new
  415. * calculation.
  416. */
  417. static int init_hash_hw(struct hash_device_data *device_data,
  418. struct hash_ctx *ctx)
  419. {
  420. int ret = 0;
  421. ret = hash_setconfiguration(device_data, &ctx->config);
  422. if (ret) {
  423. dev_err(device_data->dev, "%s: hash_setconfiguration() failed!\n",
  424. __func__);
  425. return ret;
  426. }
  427. hash_begin(device_data, ctx);
  428. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  429. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  430. return ret;
  431. }
  432. /**
  433. * hash_get_nents - Return number of entries (nents) in scatterlist (sg).
  434. *
  435. * @sg: Scatterlist.
  436. * @size: Size in bytes.
  437. * @aligned: True if sg data aligned to work in DMA mode.
  438. *
  439. */
  440. static int hash_get_nents(struct scatterlist *sg, int size, bool *aligned)
  441. {
  442. int nents = 0;
  443. bool aligned_data = true;
  444. while (size > 0 && sg) {
  445. nents++;
  446. size -= sg->length;
  447. /* hash_set_dma_transfer will align last nent */
  448. if ((aligned && !IS_ALIGNED(sg->offset, HASH_DMA_ALIGN_SIZE)) ||
  449. (!IS_ALIGNED(sg->length, HASH_DMA_ALIGN_SIZE) && size > 0))
  450. aligned_data = false;
  451. sg = sg_next(sg);
  452. }
  453. if (aligned)
  454. *aligned = aligned_data;
  455. if (size != 0)
  456. return -EFAULT;
  457. return nents;
  458. }
  459. /**
  460. * hash_dma_valid_data - checks for dma valid sg data.
  461. * @sg: Scatterlist.
  462. * @datasize: Datasize in bytes.
  463. *
  464. * NOTE! This function checks for dma valid sg data, since dma
  465. * only accept datasizes of even wordsize.
  466. */
  467. static bool hash_dma_valid_data(struct scatterlist *sg, int datasize)
  468. {
  469. bool aligned;
  470. /* Need to include at least one nent, else error */
  471. if (hash_get_nents(sg, datasize, &aligned) < 1)
  472. return false;
  473. return aligned;
  474. }
  475. /**
  476. * hash_init - Common hash init function for SHA1/SHA2 (SHA256).
  477. * @req: The hash request for the job.
  478. *
  479. * Initialize structures.
  480. */
  481. static int hash_init(struct ahash_request *req)
  482. {
  483. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  484. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  485. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  486. if (!ctx->key)
  487. ctx->keylen = 0;
  488. memset(&req_ctx->state, 0, sizeof(struct hash_state));
  489. req_ctx->updated = 0;
  490. if (hash_mode == HASH_MODE_DMA) {
  491. if (req->nbytes < HASH_DMA_ALIGN_SIZE) {
  492. req_ctx->dma_mode = false; /* Don't use DMA */
  493. pr_debug("%s: DMA mode, but direct to CPU mode for data size < %d\n",
  494. __func__, HASH_DMA_ALIGN_SIZE);
  495. } else {
  496. if (req->nbytes >= HASH_DMA_PERFORMANCE_MIN_SIZE &&
  497. hash_dma_valid_data(req->src, req->nbytes)) {
  498. req_ctx->dma_mode = true;
  499. } else {
  500. req_ctx->dma_mode = false;
  501. pr_debug("%s: DMA mode, but use CPU mode for datalength < %d or non-aligned data, except in last nent\n",
  502. __func__,
  503. HASH_DMA_PERFORMANCE_MIN_SIZE);
  504. }
  505. }
  506. }
  507. return 0;
  508. }
  509. /**
  510. * hash_processblock - This function processes a single block of 512 bits (64
  511. * bytes), word aligned, starting at message.
  512. * @device_data: Structure for the hash device.
  513. * @message: Block (512 bits) of message to be written to
  514. * the HASH hardware.
  515. *
  516. */
  517. static void hash_processblock(struct hash_device_data *device_data,
  518. const u32 *message, int length)
  519. {
  520. int len = length / HASH_BYTES_PER_WORD;
  521. /*
  522. * NBLW bits. Reset the number of bits in last word (NBLW).
  523. */
  524. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  525. /*
  526. * Write message data to the HASH_DIN register.
  527. */
  528. HASH_SET_DIN(message, len);
  529. }
  530. /**
  531. * hash_messagepad - Pads a message and write the nblw bits.
  532. * @device_data: Structure for the hash device.
  533. * @message: Last word of a message.
  534. * @index_bytes: The number of bytes in the last message.
  535. *
  536. * This function manages the final part of the digest calculation, when less
  537. * than 512 bits (64 bytes) remain in message. This means index_bytes < 64.
  538. *
  539. */
  540. static void hash_messagepad(struct hash_device_data *device_data,
  541. const u32 *message, u8 index_bytes)
  542. {
  543. int nwords = 1;
  544. /*
  545. * Clear hash str register, only clear NBLW
  546. * since DCAL will be reset by hardware.
  547. */
  548. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  549. /* Main loop */
  550. while (index_bytes >= 4) {
  551. HASH_SET_DIN(message, nwords);
  552. index_bytes -= 4;
  553. message++;
  554. }
  555. if (index_bytes)
  556. HASH_SET_DIN(message, nwords);
  557. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  558. cpu_relax();
  559. /* num_of_bytes == 0 => NBLW <- 0 (32 bits valid in DATAIN) */
  560. HASH_SET_NBLW(index_bytes * 8);
  561. dev_dbg(device_data->dev, "%s: DIN=0x%08x NBLW=%lu\n",
  562. __func__, readl_relaxed(&device_data->base->din),
  563. readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
  564. HASH_SET_DCAL;
  565. dev_dbg(device_data->dev, "%s: after dcal -> DIN=0x%08x NBLW=%lu\n",
  566. __func__, readl_relaxed(&device_data->base->din),
  567. readl_relaxed(&device_data->base->str) & HASH_STR_NBLW_MASK);
  568. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  569. cpu_relax();
  570. }
  571. /**
  572. * hash_incrementlength - Increments the length of the current message.
  573. * @ctx: Hash context
  574. * @incr: Length of message processed already
  575. *
  576. * Overflow cannot occur, because conditions for overflow are checked in
  577. * hash_hw_update.
  578. */
  579. static void hash_incrementlength(struct hash_req_ctx *ctx, u32 incr)
  580. {
  581. ctx->state.length.low_word += incr;
  582. /* Check for wrap-around */
  583. if (ctx->state.length.low_word < incr)
  584. ctx->state.length.high_word++;
  585. }
  586. /**
  587. * hash_setconfiguration - Sets the required configuration for the hash
  588. * hardware.
  589. * @device_data: Structure for the hash device.
  590. * @config: Pointer to a configuration structure.
  591. */
  592. int hash_setconfiguration(struct hash_device_data *device_data,
  593. struct hash_config *config)
  594. {
  595. int ret = 0;
  596. if (config->algorithm != HASH_ALGO_SHA1 &&
  597. config->algorithm != HASH_ALGO_SHA256)
  598. return -EPERM;
  599. /*
  600. * DATAFORM bits. Set the DATAFORM bits to 0b11, which means the data
  601. * to be written to HASH_DIN is considered as 32 bits.
  602. */
  603. HASH_SET_DATA_FORMAT(config->data_format);
  604. /*
  605. * ALGO bit. Set to 0b1 for SHA-1 and 0b0 for SHA-256
  606. */
  607. switch (config->algorithm) {
  608. case HASH_ALGO_SHA1:
  609. HASH_SET_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  610. break;
  611. case HASH_ALGO_SHA256:
  612. HASH_CLEAR_BITS(&device_data->base->cr, HASH_CR_ALGO_MASK);
  613. break;
  614. default:
  615. dev_err(device_data->dev, "%s: Incorrect algorithm\n",
  616. __func__);
  617. return -EPERM;
  618. }
  619. /*
  620. * MODE bit. This bit selects between HASH or HMAC mode for the
  621. * selected algorithm. 0b0 = HASH and 0b1 = HMAC.
  622. */
  623. if (HASH_OPER_MODE_HASH == config->oper_mode)
  624. HASH_CLEAR_BITS(&device_data->base->cr,
  625. HASH_CR_MODE_MASK);
  626. else if (HASH_OPER_MODE_HMAC == config->oper_mode) {
  627. HASH_SET_BITS(&device_data->base->cr, HASH_CR_MODE_MASK);
  628. if (device_data->current_ctx->keylen > HASH_BLOCK_SIZE) {
  629. /* Truncate key to blocksize */
  630. dev_dbg(device_data->dev, "%s: LKEY set\n", __func__);
  631. HASH_SET_BITS(&device_data->base->cr,
  632. HASH_CR_LKEY_MASK);
  633. } else {
  634. dev_dbg(device_data->dev, "%s: LKEY cleared\n",
  635. __func__);
  636. HASH_CLEAR_BITS(&device_data->base->cr,
  637. HASH_CR_LKEY_MASK);
  638. }
  639. } else { /* Wrong hash mode */
  640. ret = -EPERM;
  641. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  642. __func__);
  643. }
  644. return ret;
  645. }
  646. /**
  647. * hash_begin - This routine resets some globals and initializes the hash
  648. * hardware.
  649. * @device_data: Structure for the hash device.
  650. * @ctx: Hash context.
  651. */
  652. void hash_begin(struct hash_device_data *device_data, struct hash_ctx *ctx)
  653. {
  654. /* HW and SW initializations */
  655. /* Note: there is no need to initialize buffer and digest members */
  656. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  657. cpu_relax();
  658. /*
  659. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  660. * prepare the initialize the HASH accelerator to compute the message
  661. * digest of a new message.
  662. */
  663. HASH_INITIALIZE;
  664. /*
  665. * NBLW bits. Reset the number of bits in last word (NBLW).
  666. */
  667. HASH_CLEAR_BITS(&device_data->base->str, HASH_STR_NBLW_MASK);
  668. }
  669. static int hash_process_data(struct hash_device_data *device_data,
  670. struct hash_ctx *ctx, struct hash_req_ctx *req_ctx,
  671. int msg_length, u8 *data_buffer, u8 *buffer,
  672. u8 *index)
  673. {
  674. int ret = 0;
  675. u32 count;
  676. do {
  677. if ((*index + msg_length) < HASH_BLOCK_SIZE) {
  678. for (count = 0; count < msg_length; count++) {
  679. buffer[*index + count] =
  680. *(data_buffer + count);
  681. }
  682. *index += msg_length;
  683. msg_length = 0;
  684. } else {
  685. if (req_ctx->updated) {
  686. ret = hash_resume_state(device_data,
  687. &device_data->state);
  688. memmove(req_ctx->state.buffer,
  689. device_data->state.buffer,
  690. HASH_BLOCK_SIZE);
  691. if (ret) {
  692. dev_err(device_data->dev,
  693. "%s: hash_resume_state() failed!\n",
  694. __func__);
  695. goto out;
  696. }
  697. } else {
  698. ret = init_hash_hw(device_data, ctx);
  699. if (ret) {
  700. dev_err(device_data->dev,
  701. "%s: init_hash_hw() failed!\n",
  702. __func__);
  703. goto out;
  704. }
  705. req_ctx->updated = 1;
  706. }
  707. /*
  708. * If 'data_buffer' is four byte aligned and
  709. * local buffer does not have any data, we can
  710. * write data directly from 'data_buffer' to
  711. * HW peripheral, otherwise we first copy data
  712. * to a local buffer
  713. */
  714. if ((0 == (((u32)data_buffer) % 4)) &&
  715. (0 == *index))
  716. hash_processblock(device_data,
  717. (const u32 *)data_buffer,
  718. HASH_BLOCK_SIZE);
  719. else {
  720. for (count = 0;
  721. count < (u32)(HASH_BLOCK_SIZE - *index);
  722. count++) {
  723. buffer[*index + count] =
  724. *(data_buffer + count);
  725. }
  726. hash_processblock(device_data,
  727. (const u32 *)buffer,
  728. HASH_BLOCK_SIZE);
  729. }
  730. hash_incrementlength(req_ctx, HASH_BLOCK_SIZE);
  731. data_buffer += (HASH_BLOCK_SIZE - *index);
  732. msg_length -= (HASH_BLOCK_SIZE - *index);
  733. *index = 0;
  734. ret = hash_save_state(device_data,
  735. &device_data->state);
  736. memmove(device_data->state.buffer,
  737. req_ctx->state.buffer,
  738. HASH_BLOCK_SIZE);
  739. if (ret) {
  740. dev_err(device_data->dev, "%s: hash_save_state() failed!\n",
  741. __func__);
  742. goto out;
  743. }
  744. }
  745. } while (msg_length != 0);
  746. out:
  747. return ret;
  748. }
  749. /**
  750. * hash_dma_final - The hash dma final function for SHA1/SHA256.
  751. * @req: The hash request for the job.
  752. */
  753. static int hash_dma_final(struct ahash_request *req)
  754. {
  755. int ret = 0;
  756. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  757. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  758. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  759. struct hash_device_data *device_data;
  760. u8 digest[SHA256_DIGEST_SIZE];
  761. int bytes_written = 0;
  762. ret = hash_get_device_data(ctx, &device_data);
  763. if (ret)
  764. return ret;
  765. dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
  766. if (req_ctx->updated) {
  767. ret = hash_resume_state(device_data, &device_data->state);
  768. if (ret) {
  769. dev_err(device_data->dev, "%s: hash_resume_state() failed!\n",
  770. __func__);
  771. goto out;
  772. }
  773. }
  774. if (!req_ctx->updated) {
  775. ret = hash_setconfiguration(device_data, &ctx->config);
  776. if (ret) {
  777. dev_err(device_data->dev,
  778. "%s: hash_setconfiguration() failed!\n",
  779. __func__);
  780. goto out;
  781. }
  782. /* Enable DMA input */
  783. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode) {
  784. HASH_CLEAR_BITS(&device_data->base->cr,
  785. HASH_CR_DMAE_MASK);
  786. } else {
  787. HASH_SET_BITS(&device_data->base->cr,
  788. HASH_CR_DMAE_MASK);
  789. HASH_SET_BITS(&device_data->base->cr,
  790. HASH_CR_PRIVN_MASK);
  791. }
  792. HASH_INITIALIZE;
  793. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC)
  794. hash_hw_write_key(device_data, ctx->key, ctx->keylen);
  795. /* Number of bits in last word = (nbytes * 8) % 32 */
  796. HASH_SET_NBLW((req->nbytes * 8) % 32);
  797. req_ctx->updated = 1;
  798. }
  799. /* Store the nents in the dma struct. */
  800. ctx->device->dma.nents = hash_get_nents(req->src, req->nbytes, NULL);
  801. if (!ctx->device->dma.nents) {
  802. dev_err(device_data->dev, "%s: ctx->device->dma.nents = 0\n",
  803. __func__);
  804. ret = ctx->device->dma.nents;
  805. goto out;
  806. }
  807. bytes_written = hash_dma_write(ctx, req->src, req->nbytes);
  808. if (bytes_written != req->nbytes) {
  809. dev_err(device_data->dev, "%s: hash_dma_write() failed!\n",
  810. __func__);
  811. ret = bytes_written;
  812. goto out;
  813. }
  814. wait_for_completion(&ctx->device->dma.complete);
  815. hash_dma_done(ctx);
  816. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  817. cpu_relax();
  818. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  819. unsigned int keylen = ctx->keylen;
  820. u8 *key = ctx->key;
  821. dev_dbg(device_data->dev, "%s: keylen: %d\n",
  822. __func__, ctx->keylen);
  823. hash_hw_write_key(device_data, key, keylen);
  824. }
  825. hash_get_digest(device_data, digest, ctx->config.algorithm);
  826. memcpy(req->result, digest, ctx->digestsize);
  827. out:
  828. release_hash_device(device_data);
  829. /**
  830. * Allocated in setkey, and only used in HMAC.
  831. */
  832. kfree(ctx->key);
  833. return ret;
  834. }
  835. /**
  836. * hash_hw_final - The final hash calculation function
  837. * @req: The hash request for the job.
  838. */
  839. static int hash_hw_final(struct ahash_request *req)
  840. {
  841. int ret = 0;
  842. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  843. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  844. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  845. struct hash_device_data *device_data;
  846. u8 digest[SHA256_DIGEST_SIZE];
  847. ret = hash_get_device_data(ctx, &device_data);
  848. if (ret)
  849. return ret;
  850. dev_dbg(device_data->dev, "%s: (ctx=0x%x)!\n", __func__, (u32) ctx);
  851. if (req_ctx->updated) {
  852. ret = hash_resume_state(device_data, &device_data->state);
  853. if (ret) {
  854. dev_err(device_data->dev,
  855. "%s: hash_resume_state() failed!\n", __func__);
  856. goto out;
  857. }
  858. } else if (req->nbytes == 0 && ctx->keylen == 0) {
  859. u8 zero_hash[SHA256_DIGEST_SIZE];
  860. u32 zero_hash_size = 0;
  861. bool zero_digest = false;
  862. /**
  863. * Use a pre-calculated empty message digest
  864. * (workaround since hw return zeroes, hw bug!?)
  865. */
  866. ret = get_empty_message_digest(device_data, &zero_hash[0],
  867. &zero_hash_size, &zero_digest);
  868. if (!ret && likely(zero_hash_size == ctx->digestsize) &&
  869. zero_digest) {
  870. memcpy(req->result, &zero_hash[0], ctx->digestsize);
  871. goto out;
  872. } else if (!ret && !zero_digest) {
  873. dev_dbg(device_data->dev,
  874. "%s: HMAC zero msg with key, continue...\n",
  875. __func__);
  876. } else {
  877. dev_err(device_data->dev,
  878. "%s: ret=%d, or wrong digest size? %s\n",
  879. __func__, ret,
  880. zero_hash_size == ctx->digestsize ?
  881. "true" : "false");
  882. /* Return error */
  883. goto out;
  884. }
  885. } else if (req->nbytes == 0 && ctx->keylen > 0) {
  886. dev_err(device_data->dev, "%s: Empty message with keylength > 0, NOT supported\n",
  887. __func__);
  888. goto out;
  889. }
  890. if (!req_ctx->updated) {
  891. ret = init_hash_hw(device_data, ctx);
  892. if (ret) {
  893. dev_err(device_data->dev,
  894. "%s: init_hash_hw() failed!\n", __func__);
  895. goto out;
  896. }
  897. }
  898. if (req_ctx->state.index) {
  899. hash_messagepad(device_data, req_ctx->state.buffer,
  900. req_ctx->state.index);
  901. } else {
  902. HASH_SET_DCAL;
  903. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  904. cpu_relax();
  905. }
  906. if (ctx->config.oper_mode == HASH_OPER_MODE_HMAC && ctx->key) {
  907. unsigned int keylen = ctx->keylen;
  908. u8 *key = ctx->key;
  909. dev_dbg(device_data->dev, "%s: keylen: %d\n",
  910. __func__, ctx->keylen);
  911. hash_hw_write_key(device_data, key, keylen);
  912. }
  913. hash_get_digest(device_data, digest, ctx->config.algorithm);
  914. memcpy(req->result, digest, ctx->digestsize);
  915. out:
  916. release_hash_device(device_data);
  917. /**
  918. * Allocated in setkey, and only used in HMAC.
  919. */
  920. kfree(ctx->key);
  921. return ret;
  922. }
  923. /**
  924. * hash_hw_update - Updates current HASH computation hashing another part of
  925. * the message.
  926. * @req: Byte array containing the message to be hashed (caller
  927. * allocated).
  928. */
  929. int hash_hw_update(struct ahash_request *req)
  930. {
  931. int ret = 0;
  932. u8 index = 0;
  933. u8 *buffer;
  934. struct hash_device_data *device_data;
  935. u8 *data_buffer;
  936. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  937. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  938. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  939. struct crypto_hash_walk walk;
  940. int msg_length = crypto_hash_walk_first(req, &walk);
  941. /* Empty message ("") is correct indata */
  942. if (msg_length == 0)
  943. return ret;
  944. index = req_ctx->state.index;
  945. buffer = (u8 *)req_ctx->state.buffer;
  946. /* Check if ctx->state.length + msg_length
  947. overflows */
  948. if (msg_length > (req_ctx->state.length.low_word + msg_length) &&
  949. HASH_HIGH_WORD_MAX_VAL == req_ctx->state.length.high_word) {
  950. pr_err("%s: HASH_MSG_LENGTH_OVERFLOW!\n", __func__);
  951. return -EPERM;
  952. }
  953. ret = hash_get_device_data(ctx, &device_data);
  954. if (ret)
  955. return ret;
  956. /* Main loop */
  957. while (0 != msg_length) {
  958. data_buffer = walk.data;
  959. ret = hash_process_data(device_data, ctx, req_ctx, msg_length,
  960. data_buffer, buffer, &index);
  961. if (ret) {
  962. dev_err(device_data->dev, "%s: hash_internal_hw_update() failed!\n",
  963. __func__);
  964. goto out;
  965. }
  966. msg_length = crypto_hash_walk_done(&walk, 0);
  967. }
  968. req_ctx->state.index = index;
  969. dev_dbg(device_data->dev, "%s: indata length=%d, bin=%d\n",
  970. __func__, req_ctx->state.index, req_ctx->state.bit_index);
  971. out:
  972. release_hash_device(device_data);
  973. return ret;
  974. }
  975. /**
  976. * hash_resume_state - Function that resumes the state of an calculation.
  977. * @device_data: Pointer to the device structure.
  978. * @device_state: The state to be restored in the hash hardware
  979. */
  980. int hash_resume_state(struct hash_device_data *device_data,
  981. const struct hash_state *device_state)
  982. {
  983. u32 temp_cr;
  984. s32 count;
  985. int hash_mode = HASH_OPER_MODE_HASH;
  986. if (NULL == device_state) {
  987. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  988. __func__);
  989. return -EPERM;
  990. }
  991. /* Check correctness of index and length members */
  992. if (device_state->index > HASH_BLOCK_SIZE ||
  993. (device_state->length.low_word % HASH_BLOCK_SIZE) != 0) {
  994. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  995. __func__);
  996. return -EPERM;
  997. }
  998. /*
  999. * INIT bit. Set this bit to 0b1 to reset the HASH processor core and
  1000. * prepare the initialize the HASH accelerator to compute the message
  1001. * digest of a new message.
  1002. */
  1003. HASH_INITIALIZE;
  1004. temp_cr = device_state->temp_cr;
  1005. writel_relaxed(temp_cr & HASH_CR_RESUME_MASK, &device_data->base->cr);
  1006. if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
  1007. hash_mode = HASH_OPER_MODE_HMAC;
  1008. else
  1009. hash_mode = HASH_OPER_MODE_HASH;
  1010. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1011. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1012. break;
  1013. writel_relaxed(device_state->csr[count],
  1014. &device_data->base->csrx[count]);
  1015. }
  1016. writel_relaxed(device_state->csfull, &device_data->base->csfull);
  1017. writel_relaxed(device_state->csdatain, &device_data->base->csdatain);
  1018. writel_relaxed(device_state->str_reg, &device_data->base->str);
  1019. writel_relaxed(temp_cr, &device_data->base->cr);
  1020. return 0;
  1021. }
  1022. /**
  1023. * hash_save_state - Function that saves the state of hardware.
  1024. * @device_data: Pointer to the device structure.
  1025. * @device_state: The strucure where the hardware state should be saved.
  1026. */
  1027. int hash_save_state(struct hash_device_data *device_data,
  1028. struct hash_state *device_state)
  1029. {
  1030. u32 temp_cr;
  1031. u32 count;
  1032. int hash_mode = HASH_OPER_MODE_HASH;
  1033. if (NULL == device_state) {
  1034. dev_err(device_data->dev, "%s: HASH_INVALID_PARAMETER!\n",
  1035. __func__);
  1036. return -ENOTSUPP;
  1037. }
  1038. /* Write dummy value to force digest intermediate calculation. This
  1039. * actually makes sure that there isn't any ongoing calculation in the
  1040. * hardware.
  1041. */
  1042. while (readl(&device_data->base->str) & HASH_STR_DCAL_MASK)
  1043. cpu_relax();
  1044. temp_cr = readl_relaxed(&device_data->base->cr);
  1045. device_state->str_reg = readl_relaxed(&device_data->base->str);
  1046. device_state->din_reg = readl_relaxed(&device_data->base->din);
  1047. if (readl(&device_data->base->cr) & HASH_CR_MODE_MASK)
  1048. hash_mode = HASH_OPER_MODE_HMAC;
  1049. else
  1050. hash_mode = HASH_OPER_MODE_HASH;
  1051. for (count = 0; count < HASH_CSR_COUNT; count++) {
  1052. if ((count >= 36) && (hash_mode == HASH_OPER_MODE_HASH))
  1053. break;
  1054. device_state->csr[count] =
  1055. readl_relaxed(&device_data->base->csrx[count]);
  1056. }
  1057. device_state->csfull = readl_relaxed(&device_data->base->csfull);
  1058. device_state->csdatain = readl_relaxed(&device_data->base->csdatain);
  1059. device_state->temp_cr = temp_cr;
  1060. return 0;
  1061. }
  1062. /**
  1063. * hash_check_hw - This routine checks for peripheral Ids and PCell Ids.
  1064. * @device_data:
  1065. *
  1066. */
  1067. int hash_check_hw(struct hash_device_data *device_data)
  1068. {
  1069. /* Checking Peripheral Ids */
  1070. if (HASH_P_ID0 == readl_relaxed(&device_data->base->periphid0) &&
  1071. HASH_P_ID1 == readl_relaxed(&device_data->base->periphid1) &&
  1072. HASH_P_ID2 == readl_relaxed(&device_data->base->periphid2) &&
  1073. HASH_P_ID3 == readl_relaxed(&device_data->base->periphid3) &&
  1074. HASH_CELL_ID0 == readl_relaxed(&device_data->base->cellid0) &&
  1075. HASH_CELL_ID1 == readl_relaxed(&device_data->base->cellid1) &&
  1076. HASH_CELL_ID2 == readl_relaxed(&device_data->base->cellid2) &&
  1077. HASH_CELL_ID3 == readl_relaxed(&device_data->base->cellid3)) {
  1078. return 0;
  1079. }
  1080. dev_err(device_data->dev, "%s: HASH_UNSUPPORTED_HW!\n", __func__);
  1081. return -ENOTSUPP;
  1082. }
  1083. /**
  1084. * hash_get_digest - Gets the digest.
  1085. * @device_data: Pointer to the device structure.
  1086. * @digest: User allocated byte array for the calculated digest.
  1087. * @algorithm: The algorithm in use.
  1088. */
  1089. void hash_get_digest(struct hash_device_data *device_data,
  1090. u8 *digest, int algorithm)
  1091. {
  1092. u32 temp_hx_val, count;
  1093. int loop_ctr;
  1094. if (algorithm != HASH_ALGO_SHA1 && algorithm != HASH_ALGO_SHA256) {
  1095. dev_err(device_data->dev, "%s: Incorrect algorithm %d\n",
  1096. __func__, algorithm);
  1097. return;
  1098. }
  1099. if (algorithm == HASH_ALGO_SHA1)
  1100. loop_ctr = SHA1_DIGEST_SIZE / sizeof(u32);
  1101. else
  1102. loop_ctr = SHA256_DIGEST_SIZE / sizeof(u32);
  1103. dev_dbg(device_data->dev, "%s: digest array:(0x%x)\n",
  1104. __func__, (u32) digest);
  1105. /* Copy result into digest array */
  1106. for (count = 0; count < loop_ctr; count++) {
  1107. temp_hx_val = readl_relaxed(&device_data->base->hx[count]);
  1108. digest[count * 4] = (u8) ((temp_hx_val >> 24) & 0xFF);
  1109. digest[count * 4 + 1] = (u8) ((temp_hx_val >> 16) & 0xFF);
  1110. digest[count * 4 + 2] = (u8) ((temp_hx_val >> 8) & 0xFF);
  1111. digest[count * 4 + 3] = (u8) ((temp_hx_val >> 0) & 0xFF);
  1112. }
  1113. }
  1114. /**
  1115. * hash_update - The hash update function for SHA1/SHA2 (SHA256).
  1116. * @req: The hash request for the job.
  1117. */
  1118. static int ahash_update(struct ahash_request *req)
  1119. {
  1120. int ret = 0;
  1121. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1122. if (hash_mode != HASH_MODE_DMA || !req_ctx->dma_mode)
  1123. ret = hash_hw_update(req);
  1124. /* Skip update for DMA, all data will be passed to DMA in final */
  1125. if (ret) {
  1126. pr_err("%s: hash_hw_update() failed!\n", __func__);
  1127. }
  1128. return ret;
  1129. }
  1130. /**
  1131. * hash_final - The hash final function for SHA1/SHA2 (SHA256).
  1132. * @req: The hash request for the job.
  1133. */
  1134. static int ahash_final(struct ahash_request *req)
  1135. {
  1136. int ret = 0;
  1137. struct hash_req_ctx *req_ctx = ahash_request_ctx(req);
  1138. pr_debug("%s: data size: %d\n", __func__, req->nbytes);
  1139. if ((hash_mode == HASH_MODE_DMA) && req_ctx->dma_mode)
  1140. ret = hash_dma_final(req);
  1141. else
  1142. ret = hash_hw_final(req);
  1143. if (ret) {
  1144. pr_err("%s: hash_hw/dma_final() failed\n", __func__);
  1145. }
  1146. return ret;
  1147. }
  1148. static int hash_setkey(struct crypto_ahash *tfm,
  1149. const u8 *key, unsigned int keylen, int alg)
  1150. {
  1151. int ret = 0;
  1152. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1153. /**
  1154. * Freed in final.
  1155. */
  1156. ctx->key = kmemdup(key, keylen, GFP_KERNEL);
  1157. if (!ctx->key) {
  1158. pr_err("%s: Failed to allocate ctx->key for %d\n",
  1159. __func__, alg);
  1160. return -ENOMEM;
  1161. }
  1162. ctx->keylen = keylen;
  1163. return ret;
  1164. }
  1165. static int ahash_sha1_init(struct ahash_request *req)
  1166. {
  1167. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1168. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1169. ctx->config.data_format = HASH_DATA_8_BITS;
  1170. ctx->config.algorithm = HASH_ALGO_SHA1;
  1171. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1172. ctx->digestsize = SHA1_DIGEST_SIZE;
  1173. return hash_init(req);
  1174. }
  1175. static int ahash_sha256_init(struct ahash_request *req)
  1176. {
  1177. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1178. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1179. ctx->config.data_format = HASH_DATA_8_BITS;
  1180. ctx->config.algorithm = HASH_ALGO_SHA256;
  1181. ctx->config.oper_mode = HASH_OPER_MODE_HASH;
  1182. ctx->digestsize = SHA256_DIGEST_SIZE;
  1183. return hash_init(req);
  1184. }
  1185. static int ahash_sha1_digest(struct ahash_request *req)
  1186. {
  1187. int ret2, ret1;
  1188. ret1 = ahash_sha1_init(req);
  1189. if (ret1)
  1190. goto out;
  1191. ret1 = ahash_update(req);
  1192. ret2 = ahash_final(req);
  1193. out:
  1194. return ret1 ? ret1 : ret2;
  1195. }
  1196. static int ahash_sha256_digest(struct ahash_request *req)
  1197. {
  1198. int ret2, ret1;
  1199. ret1 = ahash_sha256_init(req);
  1200. if (ret1)
  1201. goto out;
  1202. ret1 = ahash_update(req);
  1203. ret2 = ahash_final(req);
  1204. out:
  1205. return ret1 ? ret1 : ret2;
  1206. }
  1207. static int hmac_sha1_init(struct ahash_request *req)
  1208. {
  1209. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1210. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1211. ctx->config.data_format = HASH_DATA_8_BITS;
  1212. ctx->config.algorithm = HASH_ALGO_SHA1;
  1213. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1214. ctx->digestsize = SHA1_DIGEST_SIZE;
  1215. return hash_init(req);
  1216. }
  1217. static int hmac_sha256_init(struct ahash_request *req)
  1218. {
  1219. struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
  1220. struct hash_ctx *ctx = crypto_ahash_ctx(tfm);
  1221. ctx->config.data_format = HASH_DATA_8_BITS;
  1222. ctx->config.algorithm = HASH_ALGO_SHA256;
  1223. ctx->config.oper_mode = HASH_OPER_MODE_HMAC;
  1224. ctx->digestsize = SHA256_DIGEST_SIZE;
  1225. return hash_init(req);
  1226. }
  1227. static int hmac_sha1_digest(struct ahash_request *req)
  1228. {
  1229. int ret2, ret1;
  1230. ret1 = hmac_sha1_init(req);
  1231. if (ret1)
  1232. goto out;
  1233. ret1 = ahash_update(req);
  1234. ret2 = ahash_final(req);
  1235. out:
  1236. return ret1 ? ret1 : ret2;
  1237. }
  1238. static int hmac_sha256_digest(struct ahash_request *req)
  1239. {
  1240. int ret2, ret1;
  1241. ret1 = hmac_sha256_init(req);
  1242. if (ret1)
  1243. goto out;
  1244. ret1 = ahash_update(req);
  1245. ret2 = ahash_final(req);
  1246. out:
  1247. return ret1 ? ret1 : ret2;
  1248. }
  1249. static int hmac_sha1_setkey(struct crypto_ahash *tfm,
  1250. const u8 *key, unsigned int keylen)
  1251. {
  1252. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA1);
  1253. }
  1254. static int hmac_sha256_setkey(struct crypto_ahash *tfm,
  1255. const u8 *key, unsigned int keylen)
  1256. {
  1257. return hash_setkey(tfm, key, keylen, HASH_ALGO_SHA256);
  1258. }
  1259. struct hash_algo_template {
  1260. struct hash_config conf;
  1261. struct ahash_alg hash;
  1262. };
  1263. static int hash_cra_init(struct crypto_tfm *tfm)
  1264. {
  1265. struct hash_ctx *ctx = crypto_tfm_ctx(tfm);
  1266. struct crypto_alg *alg = tfm->__crt_alg;
  1267. struct hash_algo_template *hash_alg;
  1268. hash_alg = container_of(__crypto_ahash_alg(alg),
  1269. struct hash_algo_template,
  1270. hash);
  1271. crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
  1272. sizeof(struct hash_req_ctx));
  1273. ctx->config.data_format = HASH_DATA_8_BITS;
  1274. ctx->config.algorithm = hash_alg->conf.algorithm;
  1275. ctx->config.oper_mode = hash_alg->conf.oper_mode;
  1276. ctx->digestsize = hash_alg->hash.halg.digestsize;
  1277. return 0;
  1278. }
  1279. static struct hash_algo_template hash_algs[] = {
  1280. {
  1281. .conf.algorithm = HASH_ALGO_SHA1,
  1282. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1283. .hash = {
  1284. .init = hash_init,
  1285. .update = ahash_update,
  1286. .final = ahash_final,
  1287. .digest = ahash_sha1_digest,
  1288. .halg.digestsize = SHA1_DIGEST_SIZE,
  1289. .halg.statesize = sizeof(struct hash_ctx),
  1290. .halg.base = {
  1291. .cra_name = "sha1",
  1292. .cra_driver_name = "sha1-ux500",
  1293. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1294. CRYPTO_ALG_ASYNC),
  1295. .cra_blocksize = SHA1_BLOCK_SIZE,
  1296. .cra_ctxsize = sizeof(struct hash_ctx),
  1297. .cra_init = hash_cra_init,
  1298. .cra_module = THIS_MODULE,
  1299. }
  1300. }
  1301. },
  1302. {
  1303. .conf.algorithm = HASH_ALGO_SHA256,
  1304. .conf.oper_mode = HASH_OPER_MODE_HASH,
  1305. .hash = {
  1306. .init = hash_init,
  1307. .update = ahash_update,
  1308. .final = ahash_final,
  1309. .digest = ahash_sha256_digest,
  1310. .halg.digestsize = SHA256_DIGEST_SIZE,
  1311. .halg.statesize = sizeof(struct hash_ctx),
  1312. .halg.base = {
  1313. .cra_name = "sha256",
  1314. .cra_driver_name = "sha256-ux500",
  1315. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1316. CRYPTO_ALG_ASYNC),
  1317. .cra_blocksize = SHA256_BLOCK_SIZE,
  1318. .cra_ctxsize = sizeof(struct hash_ctx),
  1319. .cra_type = &crypto_ahash_type,
  1320. .cra_init = hash_cra_init,
  1321. .cra_module = THIS_MODULE,
  1322. }
  1323. }
  1324. },
  1325. {
  1326. .conf.algorithm = HASH_ALGO_SHA1,
  1327. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1328. .hash = {
  1329. .init = hash_init,
  1330. .update = ahash_update,
  1331. .final = ahash_final,
  1332. .digest = hmac_sha1_digest,
  1333. .setkey = hmac_sha1_setkey,
  1334. .halg.digestsize = SHA1_DIGEST_SIZE,
  1335. .halg.statesize = sizeof(struct hash_ctx),
  1336. .halg.base = {
  1337. .cra_name = "hmac(sha1)",
  1338. .cra_driver_name = "hmac-sha1-ux500",
  1339. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1340. CRYPTO_ALG_ASYNC),
  1341. .cra_blocksize = SHA1_BLOCK_SIZE,
  1342. .cra_ctxsize = sizeof(struct hash_ctx),
  1343. .cra_type = &crypto_ahash_type,
  1344. .cra_init = hash_cra_init,
  1345. .cra_module = THIS_MODULE,
  1346. }
  1347. }
  1348. },
  1349. {
  1350. .conf.algorithm = HASH_ALGO_SHA256,
  1351. .conf.oper_mode = HASH_OPER_MODE_HMAC,
  1352. .hash = {
  1353. .init = hash_init,
  1354. .update = ahash_update,
  1355. .final = ahash_final,
  1356. .digest = hmac_sha256_digest,
  1357. .setkey = hmac_sha256_setkey,
  1358. .halg.digestsize = SHA256_DIGEST_SIZE,
  1359. .halg.statesize = sizeof(struct hash_ctx),
  1360. .halg.base = {
  1361. .cra_name = "hmac(sha256)",
  1362. .cra_driver_name = "hmac-sha256-ux500",
  1363. .cra_flags = (CRYPTO_ALG_TYPE_AHASH |
  1364. CRYPTO_ALG_ASYNC),
  1365. .cra_blocksize = SHA256_BLOCK_SIZE,
  1366. .cra_ctxsize = sizeof(struct hash_ctx),
  1367. .cra_type = &crypto_ahash_type,
  1368. .cra_init = hash_cra_init,
  1369. .cra_module = THIS_MODULE,
  1370. }
  1371. }
  1372. }
  1373. };
  1374. /**
  1375. * hash_algs_register_all -
  1376. */
  1377. static int ahash_algs_register_all(struct hash_device_data *device_data)
  1378. {
  1379. int ret;
  1380. int i;
  1381. int count;
  1382. for (i = 0; i < ARRAY_SIZE(hash_algs); i++) {
  1383. ret = crypto_register_ahash(&hash_algs[i].hash);
  1384. if (ret) {
  1385. count = i;
  1386. dev_err(device_data->dev, "%s: alg registration failed\n",
  1387. hash_algs[i].hash.halg.base.cra_driver_name);
  1388. goto unreg;
  1389. }
  1390. }
  1391. return 0;
  1392. unreg:
  1393. for (i = 0; i < count; i++)
  1394. crypto_unregister_ahash(&hash_algs[i].hash);
  1395. return ret;
  1396. }
  1397. /**
  1398. * hash_algs_unregister_all -
  1399. */
  1400. static void ahash_algs_unregister_all(struct hash_device_data *device_data)
  1401. {
  1402. int i;
  1403. for (i = 0; i < ARRAY_SIZE(hash_algs); i++)
  1404. crypto_unregister_ahash(&hash_algs[i].hash);
  1405. }
  1406. /**
  1407. * ux500_hash_probe - Function that probes the hash hardware.
  1408. * @pdev: The platform device.
  1409. */
  1410. static int ux500_hash_probe(struct platform_device *pdev)
  1411. {
  1412. int ret = 0;
  1413. struct resource *res = NULL;
  1414. struct hash_device_data *device_data;
  1415. struct device *dev = &pdev->dev;
  1416. device_data = devm_kzalloc(dev, sizeof(*device_data), GFP_ATOMIC);
  1417. if (!device_data) {
  1418. ret = -ENOMEM;
  1419. goto out;
  1420. }
  1421. device_data->dev = dev;
  1422. device_data->current_ctx = NULL;
  1423. res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
  1424. if (!res) {
  1425. dev_dbg(dev, "%s: platform_get_resource() failed!\n", __func__);
  1426. ret = -ENODEV;
  1427. goto out;
  1428. }
  1429. device_data->phybase = res->start;
  1430. device_data->base = devm_ioremap_resource(dev, res);
  1431. if (IS_ERR(device_data->base)) {
  1432. dev_err(dev, "%s: ioremap() failed!\n", __func__);
  1433. ret = PTR_ERR(device_data->base);
  1434. goto out;
  1435. }
  1436. spin_lock_init(&device_data->ctx_lock);
  1437. spin_lock_init(&device_data->power_state_lock);
  1438. /* Enable power for HASH1 hardware block */
  1439. device_data->regulator = regulator_get(dev, "v-ape");
  1440. if (IS_ERR(device_data->regulator)) {
  1441. dev_err(dev, "%s: regulator_get() failed!\n", __func__);
  1442. ret = PTR_ERR(device_data->regulator);
  1443. device_data->regulator = NULL;
  1444. goto out;
  1445. }
  1446. /* Enable the clock for HASH1 hardware block */
  1447. device_data->clk = devm_clk_get(dev, NULL);
  1448. if (IS_ERR(device_data->clk)) {
  1449. dev_err(dev, "%s: clk_get() failed!\n", __func__);
  1450. ret = PTR_ERR(device_data->clk);
  1451. goto out_regulator;
  1452. }
  1453. ret = clk_prepare(device_data->clk);
  1454. if (ret) {
  1455. dev_err(dev, "%s: clk_prepare() failed!\n", __func__);
  1456. goto out_regulator;
  1457. }
  1458. /* Enable device power (and clock) */
  1459. ret = hash_enable_power(device_data, false);
  1460. if (ret) {
  1461. dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
  1462. goto out_clk_unprepare;
  1463. }
  1464. ret = hash_check_hw(device_data);
  1465. if (ret) {
  1466. dev_err(dev, "%s: hash_check_hw() failed!\n", __func__);
  1467. goto out_power;
  1468. }
  1469. if (hash_mode == HASH_MODE_DMA)
  1470. hash_dma_setup_channel(device_data, dev);
  1471. platform_set_drvdata(pdev, device_data);
  1472. /* Put the new device into the device list... */
  1473. klist_add_tail(&device_data->list_node, &driver_data.device_list);
  1474. /* ... and signal that a new device is available. */
  1475. up(&driver_data.device_allocation);
  1476. ret = ahash_algs_register_all(device_data);
  1477. if (ret) {
  1478. dev_err(dev, "%s: ahash_algs_register_all() failed!\n",
  1479. __func__);
  1480. goto out_power;
  1481. }
  1482. dev_info(dev, "successfully registered\n");
  1483. return 0;
  1484. out_power:
  1485. hash_disable_power(device_data, false);
  1486. out_clk_unprepare:
  1487. clk_unprepare(device_data->clk);
  1488. out_regulator:
  1489. regulator_put(device_data->regulator);
  1490. out:
  1491. return ret;
  1492. }
  1493. /**
  1494. * ux500_hash_remove - Function that removes the hash device from the platform.
  1495. * @pdev: The platform device.
  1496. */
  1497. static int ux500_hash_remove(struct platform_device *pdev)
  1498. {
  1499. struct hash_device_data *device_data;
  1500. struct device *dev = &pdev->dev;
  1501. device_data = platform_get_drvdata(pdev);
  1502. if (!device_data) {
  1503. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1504. return -ENOMEM;
  1505. }
  1506. /* Try to decrease the number of available devices. */
  1507. if (down_trylock(&driver_data.device_allocation))
  1508. return -EBUSY;
  1509. /* Check that the device is free */
  1510. spin_lock(&device_data->ctx_lock);
  1511. /* current_ctx allocates a device, NULL = unallocated */
  1512. if (device_data->current_ctx) {
  1513. /* The device is busy */
  1514. spin_unlock(&device_data->ctx_lock);
  1515. /* Return the device to the pool. */
  1516. up(&driver_data.device_allocation);
  1517. return -EBUSY;
  1518. }
  1519. spin_unlock(&device_data->ctx_lock);
  1520. /* Remove the device from the list */
  1521. if (klist_node_attached(&device_data->list_node))
  1522. klist_remove(&device_data->list_node);
  1523. /* If this was the last device, remove the services */
  1524. if (list_empty(&driver_data.device_list.k_list))
  1525. ahash_algs_unregister_all(device_data);
  1526. if (hash_disable_power(device_data, false))
  1527. dev_err(dev, "%s: hash_disable_power() failed\n",
  1528. __func__);
  1529. clk_unprepare(device_data->clk);
  1530. regulator_put(device_data->regulator);
  1531. return 0;
  1532. }
  1533. /**
  1534. * ux500_hash_shutdown - Function that shutdown the hash device.
  1535. * @pdev: The platform device
  1536. */
  1537. static void ux500_hash_shutdown(struct platform_device *pdev)
  1538. {
  1539. struct hash_device_data *device_data;
  1540. device_data = platform_get_drvdata(pdev);
  1541. if (!device_data) {
  1542. dev_err(&pdev->dev, "%s: platform_get_drvdata() failed!\n",
  1543. __func__);
  1544. return;
  1545. }
  1546. /* Check that the device is free */
  1547. spin_lock(&device_data->ctx_lock);
  1548. /* current_ctx allocates a device, NULL = unallocated */
  1549. if (!device_data->current_ctx) {
  1550. if (down_trylock(&driver_data.device_allocation))
  1551. dev_dbg(&pdev->dev, "%s: Cryp still in use! Shutting down anyway...\n",
  1552. __func__);
  1553. /**
  1554. * (Allocate the device)
  1555. * Need to set this to non-null (dummy) value,
  1556. * to avoid usage if context switching.
  1557. */
  1558. device_data->current_ctx++;
  1559. }
  1560. spin_unlock(&device_data->ctx_lock);
  1561. /* Remove the device from the list */
  1562. if (klist_node_attached(&device_data->list_node))
  1563. klist_remove(&device_data->list_node);
  1564. /* If this was the last device, remove the services */
  1565. if (list_empty(&driver_data.device_list.k_list))
  1566. ahash_algs_unregister_all(device_data);
  1567. if (hash_disable_power(device_data, false))
  1568. dev_err(&pdev->dev, "%s: hash_disable_power() failed\n",
  1569. __func__);
  1570. }
  1571. #ifdef CONFIG_PM_SLEEP
  1572. /**
  1573. * ux500_hash_suspend - Function that suspends the hash device.
  1574. * @dev: Device to suspend.
  1575. */
  1576. static int ux500_hash_suspend(struct device *dev)
  1577. {
  1578. int ret;
  1579. struct hash_device_data *device_data;
  1580. struct hash_ctx *temp_ctx = NULL;
  1581. device_data = dev_get_drvdata(dev);
  1582. if (!device_data) {
  1583. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1584. return -ENOMEM;
  1585. }
  1586. spin_lock(&device_data->ctx_lock);
  1587. if (!device_data->current_ctx)
  1588. device_data->current_ctx++;
  1589. spin_unlock(&device_data->ctx_lock);
  1590. if (device_data->current_ctx == ++temp_ctx) {
  1591. if (down_interruptible(&driver_data.device_allocation))
  1592. dev_dbg(dev, "%s: down_interruptible() failed\n",
  1593. __func__);
  1594. ret = hash_disable_power(device_data, false);
  1595. } else {
  1596. ret = hash_disable_power(device_data, true);
  1597. }
  1598. if (ret)
  1599. dev_err(dev, "%s: hash_disable_power()\n", __func__);
  1600. return ret;
  1601. }
  1602. /**
  1603. * ux500_hash_resume - Function that resume the hash device.
  1604. * @dev: Device to resume.
  1605. */
  1606. static int ux500_hash_resume(struct device *dev)
  1607. {
  1608. int ret = 0;
  1609. struct hash_device_data *device_data;
  1610. struct hash_ctx *temp_ctx = NULL;
  1611. device_data = dev_get_drvdata(dev);
  1612. if (!device_data) {
  1613. dev_err(dev, "%s: platform_get_drvdata() failed!\n", __func__);
  1614. return -ENOMEM;
  1615. }
  1616. spin_lock(&device_data->ctx_lock);
  1617. if (device_data->current_ctx == ++temp_ctx)
  1618. device_data->current_ctx = NULL;
  1619. spin_unlock(&device_data->ctx_lock);
  1620. if (!device_data->current_ctx)
  1621. up(&driver_data.device_allocation);
  1622. else
  1623. ret = hash_enable_power(device_data, true);
  1624. if (ret)
  1625. dev_err(dev, "%s: hash_enable_power() failed!\n", __func__);
  1626. return ret;
  1627. }
  1628. #endif
  1629. static SIMPLE_DEV_PM_OPS(ux500_hash_pm, ux500_hash_suspend, ux500_hash_resume);
  1630. static const struct of_device_id ux500_hash_match[] = {
  1631. { .compatible = "stericsson,ux500-hash" },
  1632. { },
  1633. };
  1634. MODULE_DEVICE_TABLE(of, ux500_hash_match);
  1635. static struct platform_driver hash_driver = {
  1636. .probe = ux500_hash_probe,
  1637. .remove = ux500_hash_remove,
  1638. .shutdown = ux500_hash_shutdown,
  1639. .driver = {
  1640. .name = "hash1",
  1641. .of_match_table = ux500_hash_match,
  1642. .pm = &ux500_hash_pm,
  1643. }
  1644. };
  1645. /**
  1646. * ux500_hash_mod_init - The kernel module init function.
  1647. */
  1648. static int __init ux500_hash_mod_init(void)
  1649. {
  1650. klist_init(&driver_data.device_list, NULL, NULL);
  1651. /* Initialize the semaphore to 0 devices (locked state) */
  1652. sema_init(&driver_data.device_allocation, 0);
  1653. return platform_driver_register(&hash_driver);
  1654. }
  1655. /**
  1656. * ux500_hash_mod_fini - The kernel module exit function.
  1657. */
  1658. static void __exit ux500_hash_mod_fini(void)
  1659. {
  1660. platform_driver_unregister(&hash_driver);
  1661. }
  1662. module_init(ux500_hash_mod_init);
  1663. module_exit(ux500_hash_mod_fini);
  1664. MODULE_DESCRIPTION("Driver for ST-Ericsson UX500 HASH engine.");
  1665. MODULE_LICENSE("GPL");
  1666. MODULE_ALIAS_CRYPTO("sha1-all");
  1667. MODULE_ALIAS_CRYPTO("sha256-all");
  1668. MODULE_ALIAS_CRYPTO("hmac-sha1-all");
  1669. MODULE_ALIAS_CRYPTO("hmac-sha256-all");