12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028 |
- /*
- * Software async crypto daemon.
- *
- * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
- *
- * Added AEAD support to cryptd.
- * Authors: Tadeusz Struk (tadeusz.struk@intel.com)
- * Adrian Hoban <adrian.hoban@intel.com>
- * Gabriele Paoloni <gabriele.paoloni@intel.com>
- * Aidan O'Mahony (aidan.o.mahony@intel.com)
- * Copyright (c) 2010, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the Free
- * Software Foundation; either version 2 of the License, or (at your option)
- * any later version.
- *
- */
- #include <crypto/algapi.h>
- #include <crypto/internal/hash.h>
- #include <crypto/internal/aead.h>
- #include <crypto/cryptd.h>
- #include <crypto/crypto_wq.h>
- #include <linux/err.h>
- #include <linux/init.h>
- #include <linux/kernel.h>
- #include <linux/list.h>
- #include <linux/module.h>
- #include <linux/scatterlist.h>
- #include <linux/sched.h>
- #include <linux/slab.h>
- #define CRYPTD_MAX_CPU_QLEN 100
- struct cryptd_cpu_queue {
- struct crypto_queue queue;
- struct work_struct work;
- };
- struct cryptd_queue {
- struct cryptd_cpu_queue __percpu *cpu_queue;
- };
- struct cryptd_instance_ctx {
- struct crypto_spawn spawn;
- struct cryptd_queue *queue;
- };
- struct hashd_instance_ctx {
- struct crypto_shash_spawn spawn;
- struct cryptd_queue *queue;
- };
- struct aead_instance_ctx {
- struct crypto_aead_spawn aead_spawn;
- struct cryptd_queue *queue;
- };
- struct cryptd_blkcipher_ctx {
- struct crypto_blkcipher *child;
- };
- struct cryptd_blkcipher_request_ctx {
- crypto_completion_t complete;
- };
- struct cryptd_hash_ctx {
- struct crypto_shash *child;
- };
- struct cryptd_hash_request_ctx {
- crypto_completion_t complete;
- struct shash_desc desc;
- };
- struct cryptd_aead_ctx {
- struct crypto_aead *child;
- };
- struct cryptd_aead_request_ctx {
- crypto_completion_t complete;
- };
- static void cryptd_queue_worker(struct work_struct *work);
- static int cryptd_init_queue(struct cryptd_queue *queue,
- unsigned int max_cpu_qlen)
- {
- int cpu;
- struct cryptd_cpu_queue *cpu_queue;
- queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
- if (!queue->cpu_queue)
- return -ENOMEM;
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
- INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
- }
- return 0;
- }
- static void cryptd_fini_queue(struct cryptd_queue *queue)
- {
- int cpu;
- struct cryptd_cpu_queue *cpu_queue;
- for_each_possible_cpu(cpu) {
- cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
- BUG_ON(cpu_queue->queue.qlen);
- }
- free_percpu(queue->cpu_queue);
- }
- static int cryptd_enqueue_request(struct cryptd_queue *queue,
- struct crypto_async_request *request)
- {
- int cpu, err;
- struct cryptd_cpu_queue *cpu_queue;
- cpu = get_cpu();
- cpu_queue = this_cpu_ptr(queue->cpu_queue);
- err = crypto_enqueue_request(&cpu_queue->queue, request);
- queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
- put_cpu();
- return err;
- }
- /* Called in workqueue context, do one real cryption work (via
- * req->complete) and reschedule itself if there are more work to
- * do. */
- static void cryptd_queue_worker(struct work_struct *work)
- {
- struct cryptd_cpu_queue *cpu_queue;
- struct crypto_async_request *req, *backlog;
- cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
- /*
- * Only handle one request at a time to avoid hogging crypto workqueue.
- * preempt_disable/enable is used to prevent being preempted by
- * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
- * cryptd_enqueue_request() being accessed from software interrupts.
- */
- local_bh_disable();
- preempt_disable();
- backlog = crypto_get_backlog(&cpu_queue->queue);
- req = crypto_dequeue_request(&cpu_queue->queue);
- preempt_enable();
- local_bh_enable();
- if (!req)
- return;
- if (backlog)
- backlog->complete(backlog, -EINPROGRESS);
- req->complete(req, 0);
- if (cpu_queue->queue.qlen)
- queue_work(kcrypto_wq, &cpu_queue->work);
- }
- static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
- {
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- return ictx->queue;
- }
- static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
- u32 *mask)
- {
- struct crypto_attr_type *algt;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return;
- *type |= algt->type & CRYPTO_ALG_INTERNAL;
- *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
- }
- static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
- const u8 *key, unsigned int keylen)
- {
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
- struct crypto_blkcipher *child = ctx->child;
- int err;
- crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_blkcipher_setkey(child, key, keylen);
- crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
- }
- static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
- struct crypto_blkcipher *child,
- int err,
- int (*crypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int len))
- {
- struct cryptd_blkcipher_request_ctx *rctx;
- struct blkcipher_desc desc;
- rctx = ablkcipher_request_ctx(req);
- if (unlikely(err == -EINPROGRESS))
- goto out;
- desc.tfm = child;
- desc.info = req->info;
- desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypt(&desc, req->dst, req->src, req->nbytes);
- req->base.complete = rctx->complete;
- out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
- static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
- {
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
- struct crypto_blkcipher *child = ctx->child;
- cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
- crypto_blkcipher_crt(child)->encrypt);
- }
- static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
- {
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
- struct crypto_blkcipher *child = ctx->child;
- cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
- crypto_blkcipher_crt(child)->decrypt);
- }
- static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
- crypto_completion_t compl)
- {
- struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
- struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
- struct cryptd_queue *queue;
- queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
- rctx->complete = req->base.complete;
- req->base.complete = compl;
- return cryptd_enqueue_request(queue, &req->base);
- }
- static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
- {
- return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
- }
- static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
- {
- return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
- }
- static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
- {
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_spawn *spawn = &ictx->spawn;
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_blkcipher *cipher;
- cipher = crypto_spawn_blkcipher(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
- ctx->child = cipher;
- tfm->crt_ablkcipher.reqsize =
- sizeof(struct cryptd_blkcipher_request_ctx);
- return 0;
- }
- static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
- {
- struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_blkcipher(ctx->child);
- }
- static int cryptd_init_instance(struct crypto_instance *inst,
- struct crypto_alg *alg)
- {
- if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)",
- alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
- return -ENAMETOOLONG;
- memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
- inst->alg.cra_priority = alg->cra_priority + 50;
- inst->alg.cra_blocksize = alg->cra_blocksize;
- inst->alg.cra_alignmask = alg->cra_alignmask;
- return 0;
- }
- static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
- unsigned int tail)
- {
- char *p;
- struct crypto_instance *inst;
- int err;
- p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
- if (!p)
- return ERR_PTR(-ENOMEM);
- inst = (void *)(p + head);
- err = cryptd_init_instance(inst, alg);
- if (err)
- goto out_free_inst;
- out:
- return p;
- out_free_inst:
- kfree(p);
- p = ERR_PTR(err);
- goto out;
- }
- static int cryptd_create_blkcipher(struct crypto_template *tmpl,
- struct rtattr **tb,
- struct cryptd_queue *queue)
- {
- struct cryptd_instance_ctx *ctx;
- struct crypto_instance *inst;
- struct crypto_alg *alg;
- u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
- int err;
- cryptd_check_internal(tb, &type, &mask);
- alg = crypto_get_attr_alg(tb, type, mask);
- if (IS_ERR(alg))
- return PTR_ERR(alg);
- inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
- ctx = crypto_instance_ctx(inst);
- ctx->queue = queue;
- err = crypto_init_spawn(&ctx->spawn, alg, inst,
- CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
- if (err)
- goto out_free_inst;
- type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.cra_flags = type;
- inst->alg.cra_type = &crypto_ablkcipher_type;
- inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
- inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
- inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
- inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
- inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
- inst->alg.cra_init = cryptd_blkcipher_init_tfm;
- inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
- inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
- inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
- inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
- err = crypto_register_instance(tmpl, inst);
- if (err) {
- crypto_drop_spawn(&ctx->spawn);
- out_free_inst:
- kfree(inst);
- }
- out_put_alg:
- crypto_mod_put(alg);
- return err;
- }
- static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
- {
- struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
- struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
- struct crypto_shash_spawn *spawn = &ictx->spawn;
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- struct crypto_shash *hash;
- hash = crypto_spawn_shash(spawn);
- if (IS_ERR(hash))
- return PTR_ERR(hash);
- ctx->child = hash;
- crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct cryptd_hash_request_ctx) +
- crypto_shash_descsize(hash));
- return 0;
- }
- static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
- {
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
- crypto_free_shash(ctx->child);
- }
- static int cryptd_hash_setkey(struct crypto_ahash *parent,
- const u8 *key, unsigned int keylen)
- {
- struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(parent);
- struct crypto_shash *child = ctx->child;
- int err;
- crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
- crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
- CRYPTO_TFM_REQ_MASK);
- err = crypto_shash_setkey(child, key, keylen);
- crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
- CRYPTO_TFM_RES_MASK);
- return err;
- }
- static int cryptd_hash_enqueue(struct ahash_request *req,
- crypto_completion_t compl)
- {
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cryptd_queue *queue =
- cryptd_get_queue(crypto_ahash_tfm(tfm));
- rctx->complete = req->base.complete;
- req->base.complete = compl;
- return cryptd_enqueue_request(queue, &req->base);
- }
- static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
- {
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_shash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct shash_desc *desc = &rctx->desc;
- if (unlikely(err == -EINPROGRESS))
- goto out;
- desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = crypto_shash_init(desc);
- req->base.complete = rctx->complete;
- out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
- static int cryptd_hash_init_enqueue(struct ahash_request *req)
- {
- return cryptd_hash_enqueue(req, cryptd_hash_init);
- }
- static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
- {
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx;
- rctx = ahash_request_ctx(req);
- if (unlikely(err == -EINPROGRESS))
- goto out;
- err = shash_ahash_update(req, &rctx->desc);
- req->base.complete = rctx->complete;
- out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
- static int cryptd_hash_update_enqueue(struct ahash_request *req)
- {
- return cryptd_hash_enqueue(req, cryptd_hash_update);
- }
- static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
- {
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- if (unlikely(err == -EINPROGRESS))
- goto out;
- err = crypto_shash_final(&rctx->desc, req->result);
- req->base.complete = rctx->complete;
- out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
- static int cryptd_hash_final_enqueue(struct ahash_request *req)
- {
- return cryptd_hash_enqueue(req, cryptd_hash_final);
- }
- static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
- {
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- if (unlikely(err == -EINPROGRESS))
- goto out;
- err = shash_ahash_finup(req, &rctx->desc);
- req->base.complete = rctx->complete;
- out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
- static int cryptd_hash_finup_enqueue(struct ahash_request *req)
- {
- return cryptd_hash_enqueue(req, cryptd_hash_finup);
- }
- static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
- {
- struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
- struct crypto_shash *child = ctx->child;
- struct ahash_request *req = ahash_request_cast(req_async);
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- struct shash_desc *desc = &rctx->desc;
- if (unlikely(err == -EINPROGRESS))
- goto out;
- desc->tfm = child;
- desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
- err = shash_ahash_digest(req, desc);
- req->base.complete = rctx->complete;
- out:
- local_bh_disable();
- rctx->complete(&req->base, err);
- local_bh_enable();
- }
- static int cryptd_hash_digest_enqueue(struct ahash_request *req)
- {
- return cryptd_hash_enqueue(req, cryptd_hash_digest);
- }
- static int cryptd_hash_export(struct ahash_request *req, void *out)
- {
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return crypto_shash_export(&rctx->desc, out);
- }
- static int cryptd_hash_import(struct ahash_request *req, const void *in)
- {
- struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
- struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
- struct shash_desc *desc = cryptd_shash_desc(req);
- desc->tfm = ctx->child;
- desc->flags = req->base.flags;
- return crypto_shash_import(desc, in);
- }
- static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
- struct cryptd_queue *queue)
- {
- struct hashd_instance_ctx *ctx;
- struct ahash_instance *inst;
- struct shash_alg *salg;
- struct crypto_alg *alg;
- u32 type = 0;
- u32 mask = 0;
- int err;
- cryptd_check_internal(tb, &type, &mask);
- salg = shash_attr_alg(tb[1], type, mask);
- if (IS_ERR(salg))
- return PTR_ERR(salg);
- alg = &salg->base;
- inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
- sizeof(*ctx));
- err = PTR_ERR(inst);
- if (IS_ERR(inst))
- goto out_put_alg;
- ctx = ahash_instance_ctx(inst);
- ctx->queue = queue;
- err = crypto_init_shash_spawn(&ctx->spawn, salg,
- ahash_crypto_instance(inst));
- if (err)
- goto out_free_inst;
- type = CRYPTO_ALG_ASYNC;
- if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
- type |= CRYPTO_ALG_INTERNAL;
- inst->alg.halg.base.cra_flags = type;
- inst->alg.halg.digestsize = salg->digestsize;
- inst->alg.halg.statesize = salg->statesize;
- inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
- inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
- inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
- inst->alg.init = cryptd_hash_init_enqueue;
- inst->alg.update = cryptd_hash_update_enqueue;
- inst->alg.final = cryptd_hash_final_enqueue;
- inst->alg.finup = cryptd_hash_finup_enqueue;
- inst->alg.export = cryptd_hash_export;
- inst->alg.import = cryptd_hash_import;
- if (crypto_shash_alg_has_setkey(salg))
- inst->alg.setkey = cryptd_hash_setkey;
- inst->alg.digest = cryptd_hash_digest_enqueue;
- err = ahash_register_instance(tmpl, inst);
- if (err) {
- crypto_drop_shash(&ctx->spawn);
- out_free_inst:
- kfree(inst);
- }
- out_put_alg:
- crypto_mod_put(alg);
- return err;
- }
- static int cryptd_aead_setkey(struct crypto_aead *parent,
- const u8 *key, unsigned int keylen)
- {
- struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
- struct crypto_aead *child = ctx->child;
- return crypto_aead_setkey(child, key, keylen);
- }
- static int cryptd_aead_setauthsize(struct crypto_aead *parent,
- unsigned int authsize)
- {
- struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
- struct crypto_aead *child = ctx->child;
- return crypto_aead_setauthsize(child, authsize);
- }
- static void cryptd_aead_crypt(struct aead_request *req,
- struct crypto_aead *child,
- int err,
- int (*crypt)(struct aead_request *req))
- {
- struct cryptd_aead_request_ctx *rctx;
- crypto_completion_t compl;
- rctx = aead_request_ctx(req);
- compl = rctx->complete;
- if (unlikely(err == -EINPROGRESS))
- goto out;
- aead_request_set_tfm(req, child);
- err = crypt( req );
- out:
- local_bh_disable();
- compl(&req->base, err);
- local_bh_enable();
- }
- static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
- {
- struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
- struct crypto_aead *child = ctx->child;
- struct aead_request *req;
- req = container_of(areq, struct aead_request, base);
- cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
- }
- static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
- {
- struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
- struct crypto_aead *child = ctx->child;
- struct aead_request *req;
- req = container_of(areq, struct aead_request, base);
- cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
- }
- static int cryptd_aead_enqueue(struct aead_request *req,
- crypto_completion_t compl)
- {
- struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
- struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
- rctx->complete = req->base.complete;
- req->base.complete = compl;
- return cryptd_enqueue_request(queue, &req->base);
- }
- static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
- {
- return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
- }
- static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
- {
- return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
- }
- static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
- {
- struct aead_instance *inst = aead_alg_instance(tfm);
- struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
- struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
- struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
- struct crypto_aead *cipher;
- cipher = crypto_spawn_aead(spawn);
- if (IS_ERR(cipher))
- return PTR_ERR(cipher);
- ctx->child = cipher;
- crypto_aead_set_reqsize(
- tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
- crypto_aead_reqsize(cipher)));
- return 0;
- }
- static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
- {
- struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
- crypto_free_aead(ctx->child);
- }
- static int cryptd_create_aead(struct crypto_template *tmpl,
- struct rtattr **tb,
- struct cryptd_queue *queue)
- {
- struct aead_instance_ctx *ctx;
- struct aead_instance *inst;
- struct aead_alg *alg;
- const char *name;
- u32 type = 0;
- u32 mask = CRYPTO_ALG_ASYNC;
- int err;
- cryptd_check_internal(tb, &type, &mask);
- name = crypto_attr_alg_name(tb[1]);
- if (IS_ERR(name))
- return PTR_ERR(name);
- inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
- if (!inst)
- return -ENOMEM;
- ctx = aead_instance_ctx(inst);
- ctx->queue = queue;
- crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
- err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
- if (err)
- goto out_free_inst;
- alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
- err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
- if (err)
- goto out_drop_aead;
- inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
- (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
- inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
- inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
- inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
- inst->alg.init = cryptd_aead_init_tfm;
- inst->alg.exit = cryptd_aead_exit_tfm;
- inst->alg.setkey = cryptd_aead_setkey;
- inst->alg.setauthsize = cryptd_aead_setauthsize;
- inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
- inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
- err = aead_register_instance(tmpl, inst);
- if (err) {
- out_drop_aead:
- crypto_drop_aead(&ctx->aead_spawn);
- out_free_inst:
- kfree(inst);
- }
- return err;
- }
- static struct cryptd_queue queue;
- static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
- {
- struct crypto_attr_type *algt;
- algt = crypto_get_attr_type(tb);
- if (IS_ERR(algt))
- return PTR_ERR(algt);
- switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_BLKCIPHER:
- return cryptd_create_blkcipher(tmpl, tb, &queue);
- case CRYPTO_ALG_TYPE_DIGEST:
- return cryptd_create_hash(tmpl, tb, &queue);
- case CRYPTO_ALG_TYPE_AEAD:
- return cryptd_create_aead(tmpl, tb, &queue);
- }
- return -EINVAL;
- }
- static void cryptd_free(struct crypto_instance *inst)
- {
- struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
- struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
- struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
- switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
- case CRYPTO_ALG_TYPE_AHASH:
- crypto_drop_shash(&hctx->spawn);
- kfree(ahash_instance(inst));
- return;
- case CRYPTO_ALG_TYPE_AEAD:
- crypto_drop_aead(&aead_ctx->aead_spawn);
- kfree(aead_instance(inst));
- return;
- default:
- crypto_drop_spawn(&ctx->spawn);
- kfree(inst);
- }
- }
- static struct crypto_template cryptd_tmpl = {
- .name = "cryptd",
- .create = cryptd_create,
- .free = cryptd_free,
- .module = THIS_MODULE,
- };
- struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
- u32 type, u32 mask)
- {
- char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_tfm *tfm;
- if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- type &= ~(CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_GENIV);
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask &= ~CRYPTO_ALG_TYPE_MASK;
- mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
- tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_tfm(tfm);
- return ERR_PTR(-EINVAL);
- }
- return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
- }
- EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
- struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
- {
- struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
- return ctx->child;
- }
- EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
- void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
- {
- crypto_free_ablkcipher(&tfm->base);
- }
- EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
- struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
- u32 type, u32 mask)
- {
- char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_ahash *tfm;
- if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_ahash(tfm);
- return ERR_PTR(-EINVAL);
- }
- return __cryptd_ahash_cast(tfm);
- }
- EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
- struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
- {
- struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
- return ctx->child;
- }
- EXPORT_SYMBOL_GPL(cryptd_ahash_child);
- struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
- {
- struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
- return &rctx->desc;
- }
- EXPORT_SYMBOL_GPL(cryptd_shash_desc);
- void cryptd_free_ahash(struct cryptd_ahash *tfm)
- {
- crypto_free_ahash(&tfm->base);
- }
- EXPORT_SYMBOL_GPL(cryptd_free_ahash);
- struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
- u32 type, u32 mask)
- {
- char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
- struct crypto_aead *tfm;
- if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
- "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
- return ERR_PTR(-EINVAL);
- tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
- if (IS_ERR(tfm))
- return ERR_CAST(tfm);
- if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
- crypto_free_aead(tfm);
- return ERR_PTR(-EINVAL);
- }
- return __cryptd_aead_cast(tfm);
- }
- EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
- struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
- {
- struct cryptd_aead_ctx *ctx;
- ctx = crypto_aead_ctx(&tfm->base);
- return ctx->child;
- }
- EXPORT_SYMBOL_GPL(cryptd_aead_child);
- void cryptd_free_aead(struct cryptd_aead *tfm)
- {
- crypto_free_aead(&tfm->base);
- }
- EXPORT_SYMBOL_GPL(cryptd_free_aead);
- static int __init cryptd_init(void)
- {
- int err;
- err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
- if (err)
- return err;
- err = crypto_register_template(&cryptd_tmpl);
- if (err)
- cryptd_fini_queue(&queue);
- return err;
- }
- static void __exit cryptd_exit(void)
- {
- cryptd_fini_queue(&queue);
- crypto_unregister_template(&cryptd_tmpl);
- }
- subsys_initcall(cryptd_init);
- module_exit(cryptd_exit);
- MODULE_LICENSE("GPL");
- MODULE_DESCRIPTION("Software async crypto daemon");
- MODULE_ALIAS_CRYPTO("cryptd");
|