Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/crypto/cryptd.c

  1 /*
  2  * Software async crypto daemon.
  3  *
  4  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
  5  *
  6  * Added AEAD support to cryptd.
  7  *    Authors: Tadeusz Struk (tadeusz.struk@intel.com)
  8  *             Adrian Hoban <adrian.hoban@intel.com>
  9  *             Gabriele Paoloni <gabriele.paoloni@intel.com>
 10  *             Aidan O'Mahony (aidan.o.mahony@intel.com)
 11  *    Copyright (c) 2010, Intel Corporation.
 12  *
 13  * This program is free software; you can redistribute it and/or modify it
 14  * under the terms of the GNU General Public License as published by the Free
 15  * Software Foundation; either version 2 of the License, or (at your option)
 16  * any later version.
 17  *
 18  */
 19 
 20 #include <crypto/internal/hash.h>
 21 #include <crypto/internal/aead.h>
 22 #include <crypto/internal/skcipher.h>
 23 #include <crypto/cryptd.h>
 24 #include <crypto/crypto_wq.h>
 25 #include <linux/atomic.h>
 26 #include <linux/err.h>
 27 #include <linux/init.h>
 28 #include <linux/kernel.h>
 29 #include <linux/list.h>
 30 #include <linux/module.h>
 31 #include <linux/scatterlist.h>
 32 #include <linux/sched.h>
 33 #include <linux/slab.h>
 34 
 35 #define CRYPTD_MAX_CPU_QLEN 1000
 36 
 37 struct cryptd_cpu_queue {
 38         struct crypto_queue queue;
 39         struct work_struct work;
 40 };
 41 
 42 struct cryptd_queue {
 43         struct cryptd_cpu_queue __percpu *cpu_queue;
 44 };
 45 
 46 struct cryptd_instance_ctx {
 47         struct crypto_spawn spawn;
 48         struct cryptd_queue *queue;
 49 };
 50 
 51 struct skcipherd_instance_ctx {
 52         struct crypto_skcipher_spawn spawn;
 53         struct cryptd_queue *queue;
 54 };
 55 
 56 struct hashd_instance_ctx {
 57         struct crypto_shash_spawn spawn;
 58         struct cryptd_queue *queue;
 59 };
 60 
 61 struct aead_instance_ctx {
 62         struct crypto_aead_spawn aead_spawn;
 63         struct cryptd_queue *queue;
 64 };
 65 
 66 struct cryptd_blkcipher_ctx {
 67         atomic_t refcnt;
 68         struct crypto_blkcipher *child;
 69 };
 70 
 71 struct cryptd_blkcipher_request_ctx {
 72         crypto_completion_t complete;
 73 };
 74 
 75 struct cryptd_skcipher_ctx {
 76         atomic_t refcnt;
 77         struct crypto_skcipher *child;
 78 };
 79 
 80 struct cryptd_skcipher_request_ctx {
 81         crypto_completion_t complete;
 82 };
 83 
 84 struct cryptd_hash_ctx {
 85         atomic_t refcnt;
 86         struct crypto_shash *child;
 87 };
 88 
 89 struct cryptd_hash_request_ctx {
 90         crypto_completion_t complete;
 91         struct shash_desc desc;
 92 };
 93 
 94 struct cryptd_aead_ctx {
 95         atomic_t refcnt;
 96         struct crypto_aead *child;
 97 };
 98 
 99 struct cryptd_aead_request_ctx {
100         crypto_completion_t complete;
101 };
102 
103 static void cryptd_queue_worker(struct work_struct *work);
104 
105 static int cryptd_init_queue(struct cryptd_queue *queue,
106                              unsigned int max_cpu_qlen)
107 {
108         int cpu;
109         struct cryptd_cpu_queue *cpu_queue;
110 
111         queue->cpu_queue = alloc_percpu(struct cryptd_cpu_queue);
112         if (!queue->cpu_queue)
113                 return -ENOMEM;
114         for_each_possible_cpu(cpu) {
115                 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
116                 crypto_init_queue(&cpu_queue->queue, max_cpu_qlen);
117                 INIT_WORK(&cpu_queue->work, cryptd_queue_worker);
118         }
119         return 0;
120 }
121 
122 static void cryptd_fini_queue(struct cryptd_queue *queue)
123 {
124         int cpu;
125         struct cryptd_cpu_queue *cpu_queue;
126 
127         for_each_possible_cpu(cpu) {
128                 cpu_queue = per_cpu_ptr(queue->cpu_queue, cpu);
129                 BUG_ON(cpu_queue->queue.qlen);
130         }
131         free_percpu(queue->cpu_queue);
132 }
133 
134 static int cryptd_enqueue_request(struct cryptd_queue *queue,
135                                   struct crypto_async_request *request)
136 {
137         int cpu, err;
138         struct cryptd_cpu_queue *cpu_queue;
139         atomic_t *refcnt;
140         bool may_backlog;
141 
142         cpu = get_cpu();
143         cpu_queue = this_cpu_ptr(queue->cpu_queue);
144         err = crypto_enqueue_request(&cpu_queue->queue, request);
145 
146         refcnt = crypto_tfm_ctx(request->tfm);
147         may_backlog = request->flags & CRYPTO_TFM_REQ_MAY_BACKLOG;
148 
149         if (err == -EBUSY && !may_backlog)
150                 goto out_put_cpu;
151 
152         queue_work_on(cpu, kcrypto_wq, &cpu_queue->work);
153 
154         if (!atomic_read(refcnt))
155                 goto out_put_cpu;
156 
157         atomic_inc(refcnt);
158 
159 out_put_cpu:
160         put_cpu();
161 
162         return err;
163 }
164 
165 /* Called in workqueue context, do one real cryption work (via
166  * req->complete) and reschedule itself if there are more work to
167  * do. */
168 static void cryptd_queue_worker(struct work_struct *work)
169 {
170         struct cryptd_cpu_queue *cpu_queue;
171         struct crypto_async_request *req, *backlog;
172 
173         cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
174         /*
175          * Only handle one request at a time to avoid hogging crypto workqueue.
176          * preempt_disable/enable is used to prevent being preempted by
177          * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
178          * cryptd_enqueue_request() being accessed from software interrupts.
179          */
180         local_bh_disable();
181         preempt_disable();
182         backlog = crypto_get_backlog(&cpu_queue->queue);
183         req = crypto_dequeue_request(&cpu_queue->queue);
184         preempt_enable();
185         local_bh_enable();
186 
187         if (!req)
188                 return;
189 
190         if (backlog)
191                 backlog->complete(backlog, -EINPROGRESS);
192         req->complete(req, 0);
193 
194         if (cpu_queue->queue.qlen)
195                 queue_work(kcrypto_wq, &cpu_queue->work);
196 }
197 
198 static inline struct cryptd_queue *cryptd_get_queue(struct crypto_tfm *tfm)
199 {
200         struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
201         struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
202         return ictx->queue;
203 }
204 
205 static inline void cryptd_check_internal(struct rtattr **tb, u32 *type,
206                                          u32 *mask)
207 {
208         struct crypto_attr_type *algt;
209 
210         algt = crypto_get_attr_type(tb);
211         if (IS_ERR(algt))
212                 return;
213 
214         *type |= algt->type & CRYPTO_ALG_INTERNAL;
215         *mask |= algt->mask & CRYPTO_ALG_INTERNAL;
216 }
217 
218 static int cryptd_blkcipher_setkey(struct crypto_ablkcipher *parent,
219                                    const u8 *key, unsigned int keylen)
220 {
221         struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(parent);
222         struct crypto_blkcipher *child = ctx->child;
223         int err;
224 
225         crypto_blkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
226         crypto_blkcipher_set_flags(child, crypto_ablkcipher_get_flags(parent) &
227                                           CRYPTO_TFM_REQ_MASK);
228         err = crypto_blkcipher_setkey(child, key, keylen);
229         crypto_ablkcipher_set_flags(parent, crypto_blkcipher_get_flags(child) &
230                                             CRYPTO_TFM_RES_MASK);
231         return err;
232 }
233 
234 static void cryptd_blkcipher_crypt(struct ablkcipher_request *req,
235                                    struct crypto_blkcipher *child,
236                                    int err,
237                                    int (*crypt)(struct blkcipher_desc *desc,
238                                                 struct scatterlist *dst,
239                                                 struct scatterlist *src,
240                                                 unsigned int len))
241 {
242         struct cryptd_blkcipher_request_ctx *rctx;
243         struct cryptd_blkcipher_ctx *ctx;
244         struct crypto_ablkcipher *tfm;
245         struct blkcipher_desc desc;
246         int refcnt;
247 
248         rctx = ablkcipher_request_ctx(req);
249 
250         if (unlikely(err == -EINPROGRESS))
251                 goto out;
252 
253         desc.tfm = child;
254         desc.info = req->info;
255         desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
256 
257         err = crypt(&desc, req->dst, req->src, req->nbytes);
258 
259         req->base.complete = rctx->complete;
260 
261 out:
262         tfm = crypto_ablkcipher_reqtfm(req);
263         ctx = crypto_ablkcipher_ctx(tfm);
264         refcnt = atomic_read(&ctx->refcnt);
265 
266         local_bh_disable();
267         rctx->complete(&req->base, err);
268         local_bh_enable();
269 
270         if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
271                 crypto_free_ablkcipher(tfm);
272 }
273 
274 static void cryptd_blkcipher_encrypt(struct crypto_async_request *req, int err)
275 {
276         struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
277         struct crypto_blkcipher *child = ctx->child;
278 
279         cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
280                                crypto_blkcipher_crt(child)->encrypt);
281 }
282 
283 static void cryptd_blkcipher_decrypt(struct crypto_async_request *req, int err)
284 {
285         struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(req->tfm);
286         struct crypto_blkcipher *child = ctx->child;
287 
288         cryptd_blkcipher_crypt(ablkcipher_request_cast(req), child, err,
289                                crypto_blkcipher_crt(child)->decrypt);
290 }
291 
292 static int cryptd_blkcipher_enqueue(struct ablkcipher_request *req,
293                                     crypto_completion_t compl)
294 {
295         struct cryptd_blkcipher_request_ctx *rctx = ablkcipher_request_ctx(req);
296         struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
297         struct cryptd_queue *queue;
298 
299         queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
300         rctx->complete = req->base.complete;
301         req->base.complete = compl;
302 
303         return cryptd_enqueue_request(queue, &req->base);
304 }
305 
306 static int cryptd_blkcipher_encrypt_enqueue(struct ablkcipher_request *req)
307 {
308         return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_encrypt);
309 }
310 
311 static int cryptd_blkcipher_decrypt_enqueue(struct ablkcipher_request *req)
312 {
313         return cryptd_blkcipher_enqueue(req, cryptd_blkcipher_decrypt);
314 }
315 
316 static int cryptd_blkcipher_init_tfm(struct crypto_tfm *tfm)
317 {
318         struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
319         struct cryptd_instance_ctx *ictx = crypto_instance_ctx(inst);
320         struct crypto_spawn *spawn = &ictx->spawn;
321         struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
322         struct crypto_blkcipher *cipher;
323 
324         cipher = crypto_spawn_blkcipher(spawn);
325         if (IS_ERR(cipher))
326                 return PTR_ERR(cipher);
327 
328         ctx->child = cipher;
329         tfm->crt_ablkcipher.reqsize =
330                 sizeof(struct cryptd_blkcipher_request_ctx);
331         return 0;
332 }
333 
334 static void cryptd_blkcipher_exit_tfm(struct crypto_tfm *tfm)
335 {
336         struct cryptd_blkcipher_ctx *ctx = crypto_tfm_ctx(tfm);
337 
338         crypto_free_blkcipher(ctx->child);
339 }
340 
341 static int cryptd_init_instance(struct crypto_instance *inst,
342                                 struct crypto_alg *alg)
343 {
344         if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
345                      "cryptd(%s)",
346                      alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
347                 return -ENAMETOOLONG;
348 
349         memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
350 
351         inst->alg.cra_priority = alg->cra_priority + 50;
352         inst->alg.cra_blocksize = alg->cra_blocksize;
353         inst->alg.cra_alignmask = alg->cra_alignmask;
354 
355         return 0;
356 }
357 
358 static void *cryptd_alloc_instance(struct crypto_alg *alg, unsigned int head,
359                                    unsigned int tail)
360 {
361         char *p;
362         struct crypto_instance *inst;
363         int err;
364 
365         p = kzalloc(head + sizeof(*inst) + tail, GFP_KERNEL);
366         if (!p)
367                 return ERR_PTR(-ENOMEM);
368 
369         inst = (void *)(p + head);
370 
371         err = cryptd_init_instance(inst, alg);
372         if (err)
373                 goto out_free_inst;
374 
375 out:
376         return p;
377 
378 out_free_inst:
379         kfree(p);
380         p = ERR_PTR(err);
381         goto out;
382 }
383 
384 static int cryptd_create_blkcipher(struct crypto_template *tmpl,
385                                    struct rtattr **tb,
386                                    struct cryptd_queue *queue)
387 {
388         struct cryptd_instance_ctx *ctx;
389         struct crypto_instance *inst;
390         struct crypto_alg *alg;
391         u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
392         u32 mask = CRYPTO_ALG_TYPE_MASK;
393         int err;
394 
395         cryptd_check_internal(tb, &type, &mask);
396 
397         alg = crypto_get_attr_alg(tb, type, mask);
398         if (IS_ERR(alg))
399                 return PTR_ERR(alg);
400 
401         inst = cryptd_alloc_instance(alg, 0, sizeof(*ctx));
402         err = PTR_ERR(inst);
403         if (IS_ERR(inst))
404                 goto out_put_alg;
405 
406         ctx = crypto_instance_ctx(inst);
407         ctx->queue = queue;
408 
409         err = crypto_init_spawn(&ctx->spawn, alg, inst,
410                                 CRYPTO_ALG_TYPE_MASK | CRYPTO_ALG_ASYNC);
411         if (err)
412                 goto out_free_inst;
413 
414         type = CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
415         if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
416                 type |= CRYPTO_ALG_INTERNAL;
417         inst->alg.cra_flags = type;
418         inst->alg.cra_type = &crypto_ablkcipher_type;
419 
420         inst->alg.cra_ablkcipher.ivsize = alg->cra_blkcipher.ivsize;
421         inst->alg.cra_ablkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
422         inst->alg.cra_ablkcipher.max_keysize = alg->cra_blkcipher.max_keysize;
423 
424         inst->alg.cra_ablkcipher.geniv = alg->cra_blkcipher.geniv;
425 
426         inst->alg.cra_ctxsize = sizeof(struct cryptd_blkcipher_ctx);
427 
428         inst->alg.cra_init = cryptd_blkcipher_init_tfm;
429         inst->alg.cra_exit = cryptd_blkcipher_exit_tfm;
430 
431         inst->alg.cra_ablkcipher.setkey = cryptd_blkcipher_setkey;
432         inst->alg.cra_ablkcipher.encrypt = cryptd_blkcipher_encrypt_enqueue;
433         inst->alg.cra_ablkcipher.decrypt = cryptd_blkcipher_decrypt_enqueue;
434 
435         err = crypto_register_instance(tmpl, inst);
436         if (err) {
437                 crypto_drop_spawn(&ctx->spawn);
438 out_free_inst:
439                 kfree(inst);
440         }
441 
442 out_put_alg:
443         crypto_mod_put(alg);
444         return err;
445 }
446 
447 static int cryptd_skcipher_setkey(struct crypto_skcipher *parent,
448                                   const u8 *key, unsigned int keylen)
449 {
450         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(parent);
451         struct crypto_skcipher *child = ctx->child;
452         int err;
453 
454         crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
455         crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
456                                          CRYPTO_TFM_REQ_MASK);
457         err = crypto_skcipher_setkey(child, key, keylen);
458         crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
459                                           CRYPTO_TFM_RES_MASK);
460         return err;
461 }
462 
463 static void cryptd_skcipher_complete(struct skcipher_request *req, int err)
464 {
465         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
466         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
467         struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
468         int refcnt = atomic_read(&ctx->refcnt);
469 
470         local_bh_disable();
471         rctx->complete(&req->base, err);
472         local_bh_enable();
473 
474         if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
475                 crypto_free_skcipher(tfm);
476 }
477 
478 static void cryptd_skcipher_encrypt(struct crypto_async_request *base,
479                                     int err)
480 {
481         struct skcipher_request *req = skcipher_request_cast(base);
482         struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
483         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
484         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
485         struct crypto_skcipher *child = ctx->child;
486         SKCIPHER_REQUEST_ON_STACK(subreq, child);
487 
488         if (unlikely(err == -EINPROGRESS))
489                 goto out;
490 
491         skcipher_request_set_tfm(subreq, child);
492         skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
493                                       NULL, NULL);
494         skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
495                                    req->iv);
496 
497         err = crypto_skcipher_encrypt(subreq);
498         skcipher_request_zero(subreq);
499 
500         req->base.complete = rctx->complete;
501 
502 out:
503         cryptd_skcipher_complete(req, err);
504 }
505 
506 static void cryptd_skcipher_decrypt(struct crypto_async_request *base,
507                                     int err)
508 {
509         struct skcipher_request *req = skcipher_request_cast(base);
510         struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
511         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
512         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
513         struct crypto_skcipher *child = ctx->child;
514         SKCIPHER_REQUEST_ON_STACK(subreq, child);
515 
516         if (unlikely(err == -EINPROGRESS))
517                 goto out;
518 
519         skcipher_request_set_tfm(subreq, child);
520         skcipher_request_set_callback(subreq, CRYPTO_TFM_REQ_MAY_SLEEP,
521                                       NULL, NULL);
522         skcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
523                                    req->iv);
524 
525         err = crypto_skcipher_decrypt(subreq);
526         skcipher_request_zero(subreq);
527 
528         req->base.complete = rctx->complete;
529 
530 out:
531         cryptd_skcipher_complete(req, err);
532 }
533 
534 static int cryptd_skcipher_enqueue(struct skcipher_request *req,
535                                    crypto_completion_t compl)
536 {
537         struct cryptd_skcipher_request_ctx *rctx = skcipher_request_ctx(req);
538         struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
539         struct cryptd_queue *queue;
540 
541         queue = cryptd_get_queue(crypto_skcipher_tfm(tfm));
542         rctx->complete = req->base.complete;
543         req->base.complete = compl;
544 
545         return cryptd_enqueue_request(queue, &req->base);
546 }
547 
548 static int cryptd_skcipher_encrypt_enqueue(struct skcipher_request *req)
549 {
550         return cryptd_skcipher_enqueue(req, cryptd_skcipher_encrypt);
551 }
552 
553 static int cryptd_skcipher_decrypt_enqueue(struct skcipher_request *req)
554 {
555         return cryptd_skcipher_enqueue(req, cryptd_skcipher_decrypt);
556 }
557 
558 static int cryptd_skcipher_init_tfm(struct crypto_skcipher *tfm)
559 {
560         struct skcipher_instance *inst = skcipher_alg_instance(tfm);
561         struct skcipherd_instance_ctx *ictx = skcipher_instance_ctx(inst);
562         struct crypto_skcipher_spawn *spawn = &ictx->spawn;
563         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
564         struct crypto_skcipher *cipher;
565 
566         cipher = crypto_spawn_skcipher(spawn);
567         if (IS_ERR(cipher))
568                 return PTR_ERR(cipher);
569 
570         ctx->child = cipher;
571         crypto_skcipher_set_reqsize(
572                 tfm, sizeof(struct cryptd_skcipher_request_ctx));
573         return 0;
574 }
575 
576 static void cryptd_skcipher_exit_tfm(struct crypto_skcipher *tfm)
577 {
578         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(tfm);
579 
580         crypto_free_skcipher(ctx->child);
581 }
582 
583 static void cryptd_skcipher_free(struct skcipher_instance *inst)
584 {
585         struct skcipherd_instance_ctx *ctx = skcipher_instance_ctx(inst);
586 
587         crypto_drop_skcipher(&ctx->spawn);
588 }
589 
590 static int cryptd_create_skcipher(struct crypto_template *tmpl,
591                                   struct rtattr **tb,
592                                   struct cryptd_queue *queue)
593 {
594         struct skcipherd_instance_ctx *ctx;
595         struct skcipher_instance *inst;
596         struct skcipher_alg *alg;
597         const char *name;
598         u32 type;
599         u32 mask;
600         int err;
601 
602         type = 0;
603         mask = CRYPTO_ALG_ASYNC;
604 
605         cryptd_check_internal(tb, &type, &mask);
606 
607         name = crypto_attr_alg_name(tb[1]);
608         if (IS_ERR(name))
609                 return PTR_ERR(name);
610 
611         inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
612         if (!inst)
613                 return -ENOMEM;
614 
615         ctx = skcipher_instance_ctx(inst);
616         ctx->queue = queue;
617 
618         crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
619         err = crypto_grab_skcipher(&ctx->spawn, name, type, mask);
620         if (err)
621                 goto out_free_inst;
622 
623         alg = crypto_spawn_skcipher_alg(&ctx->spawn);
624         err = cryptd_init_instance(skcipher_crypto_instance(inst), &alg->base);
625         if (err)
626                 goto out_drop_skcipher;
627 
628         inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
629                                    (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
630 
631         inst->alg.ivsize = crypto_skcipher_alg_ivsize(alg);
632         inst->alg.chunksize = crypto_skcipher_alg_chunksize(alg);
633         inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg);
634         inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg);
635 
636         inst->alg.base.cra_ctxsize = sizeof(struct cryptd_skcipher_ctx);
637 
638         inst->alg.init = cryptd_skcipher_init_tfm;
639         inst->alg.exit = cryptd_skcipher_exit_tfm;
640 
641         inst->alg.setkey = cryptd_skcipher_setkey;
642         inst->alg.encrypt = cryptd_skcipher_encrypt_enqueue;
643         inst->alg.decrypt = cryptd_skcipher_decrypt_enqueue;
644 
645         inst->free = cryptd_skcipher_free;
646 
647         err = skcipher_register_instance(tmpl, inst);
648         if (err) {
649 out_drop_skcipher:
650                 crypto_drop_skcipher(&ctx->spawn);
651 out_free_inst:
652                 kfree(inst);
653         }
654         return err;
655 }
656 
657 static int cryptd_hash_init_tfm(struct crypto_tfm *tfm)
658 {
659         struct crypto_instance *inst = crypto_tfm_alg_instance(tfm);
660         struct hashd_instance_ctx *ictx = crypto_instance_ctx(inst);
661         struct crypto_shash_spawn *spawn = &ictx->spawn;
662         struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
663         struct crypto_shash *hash;
664 
665         hash = crypto_spawn_shash(spawn);
666         if (IS_ERR(hash))
667                 return PTR_ERR(hash);
668 
669         ctx->child = hash;
670         crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
671                                  sizeof(struct cryptd_hash_request_ctx) +
672                                  crypto_shash_descsize(hash));
673         return 0;
674 }
675 
676 static void cryptd_hash_exit_tfm(struct crypto_tfm *tfm)
677 {
678         struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(tfm);
679 
680         crypto_free_shash(ctx->child);
681 }
682 
683 static int cryptd_hash_setkey(struct crypto_ahash *parent,
684                                    const u8 *key, unsigned int keylen)
685 {
686         struct cryptd_hash_ctx *ctx   = crypto_ahash_ctx(parent);
687         struct crypto_shash *child = ctx->child;
688         int err;
689 
690         crypto_shash_clear_flags(child, CRYPTO_TFM_REQ_MASK);
691         crypto_shash_set_flags(child, crypto_ahash_get_flags(parent) &
692                                       CRYPTO_TFM_REQ_MASK);
693         err = crypto_shash_setkey(child, key, keylen);
694         crypto_ahash_set_flags(parent, crypto_shash_get_flags(child) &
695                                        CRYPTO_TFM_RES_MASK);
696         return err;
697 }
698 
699 static int cryptd_hash_enqueue(struct ahash_request *req,
700                                 crypto_completion_t compl)
701 {
702         struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
703         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
704         struct cryptd_queue *queue =
705                 cryptd_get_queue(crypto_ahash_tfm(tfm));
706 
707         rctx->complete = req->base.complete;
708         req->base.complete = compl;
709 
710         return cryptd_enqueue_request(queue, &req->base);
711 }
712 
713 static void cryptd_hash_complete(struct ahash_request *req, int err)
714 {
715         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
716         struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
717         struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
718         int refcnt = atomic_read(&ctx->refcnt);
719 
720         local_bh_disable();
721         rctx->complete(&req->base, err);
722         local_bh_enable();
723 
724         if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
725                 crypto_free_ahash(tfm);
726 }
727 
728 static void cryptd_hash_init(struct crypto_async_request *req_async, int err)
729 {
730         struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
731         struct crypto_shash *child = ctx->child;
732         struct ahash_request *req = ahash_request_cast(req_async);
733         struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
734         struct shash_desc *desc = &rctx->desc;
735 
736         if (unlikely(err == -EINPROGRESS))
737                 goto out;
738 
739         desc->tfm = child;
740         desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
741 
742         err = crypto_shash_init(desc);
743 
744         req->base.complete = rctx->complete;
745 
746 out:
747         cryptd_hash_complete(req, err);
748 }
749 
750 static int cryptd_hash_init_enqueue(struct ahash_request *req)
751 {
752         return cryptd_hash_enqueue(req, cryptd_hash_init);
753 }
754 
755 static void cryptd_hash_update(struct crypto_async_request *req_async, int err)
756 {
757         struct ahash_request *req = ahash_request_cast(req_async);
758         struct cryptd_hash_request_ctx *rctx;
759 
760         rctx = ahash_request_ctx(req);
761 
762         if (unlikely(err == -EINPROGRESS))
763                 goto out;
764 
765         err = shash_ahash_update(req, &rctx->desc);
766 
767         req->base.complete = rctx->complete;
768 
769 out:
770         cryptd_hash_complete(req, err);
771 }
772 
773 static int cryptd_hash_update_enqueue(struct ahash_request *req)
774 {
775         return cryptd_hash_enqueue(req, cryptd_hash_update);
776 }
777 
778 static void cryptd_hash_final(struct crypto_async_request *req_async, int err)
779 {
780         struct ahash_request *req = ahash_request_cast(req_async);
781         struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
782 
783         if (unlikely(err == -EINPROGRESS))
784                 goto out;
785 
786         err = crypto_shash_final(&rctx->desc, req->result);
787 
788         req->base.complete = rctx->complete;
789 
790 out:
791         cryptd_hash_complete(req, err);
792 }
793 
794 static int cryptd_hash_final_enqueue(struct ahash_request *req)
795 {
796         return cryptd_hash_enqueue(req, cryptd_hash_final);
797 }
798 
799 static void cryptd_hash_finup(struct crypto_async_request *req_async, int err)
800 {
801         struct ahash_request *req = ahash_request_cast(req_async);
802         struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
803 
804         if (unlikely(err == -EINPROGRESS))
805                 goto out;
806 
807         err = shash_ahash_finup(req, &rctx->desc);
808 
809         req->base.complete = rctx->complete;
810 
811 out:
812         cryptd_hash_complete(req, err);
813 }
814 
815 static int cryptd_hash_finup_enqueue(struct ahash_request *req)
816 {
817         return cryptd_hash_enqueue(req, cryptd_hash_finup);
818 }
819 
820 static void cryptd_hash_digest(struct crypto_async_request *req_async, int err)
821 {
822         struct cryptd_hash_ctx *ctx = crypto_tfm_ctx(req_async->tfm);
823         struct crypto_shash *child = ctx->child;
824         struct ahash_request *req = ahash_request_cast(req_async);
825         struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
826         struct shash_desc *desc = &rctx->desc;
827 
828         if (unlikely(err == -EINPROGRESS))
829                 goto out;
830 
831         desc->tfm = child;
832         desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
833 
834         err = shash_ahash_digest(req, desc);
835 
836         req->base.complete = rctx->complete;
837 
838 out:
839         cryptd_hash_complete(req, err);
840 }
841 
842 static int cryptd_hash_digest_enqueue(struct ahash_request *req)
843 {
844         return cryptd_hash_enqueue(req, cryptd_hash_digest);
845 }
846 
847 static int cryptd_hash_export(struct ahash_request *req, void *out)
848 {
849         struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
850 
851         return crypto_shash_export(&rctx->desc, out);
852 }
853 
854 static int cryptd_hash_import(struct ahash_request *req, const void *in)
855 {
856         struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
857         struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(tfm);
858         struct shash_desc *desc = cryptd_shash_desc(req);
859 
860         desc->tfm = ctx->child;
861         desc->flags = req->base.flags;
862 
863         return crypto_shash_import(desc, in);
864 }
865 
866 static int cryptd_create_hash(struct crypto_template *tmpl, struct rtattr **tb,
867                               struct cryptd_queue *queue)
868 {
869         struct hashd_instance_ctx *ctx;
870         struct ahash_instance *inst;
871         struct shash_alg *salg;
872         struct crypto_alg *alg;
873         u32 type = 0;
874         u32 mask = 0;
875         int err;
876 
877         cryptd_check_internal(tb, &type, &mask);
878 
879         salg = shash_attr_alg(tb[1], type, mask);
880         if (IS_ERR(salg))
881                 return PTR_ERR(salg);
882 
883         alg = &salg->base;
884         inst = cryptd_alloc_instance(alg, ahash_instance_headroom(),
885                                      sizeof(*ctx));
886         err = PTR_ERR(inst);
887         if (IS_ERR(inst))
888                 goto out_put_alg;
889 
890         ctx = ahash_instance_ctx(inst);
891         ctx->queue = queue;
892 
893         err = crypto_init_shash_spawn(&ctx->spawn, salg,
894                                       ahash_crypto_instance(inst));
895         if (err)
896                 goto out_free_inst;
897 
898         type = CRYPTO_ALG_ASYNC;
899         if (alg->cra_flags & CRYPTO_ALG_INTERNAL)
900                 type |= CRYPTO_ALG_INTERNAL;
901         inst->alg.halg.base.cra_flags = type;
902 
903         inst->alg.halg.digestsize = salg->digestsize;
904         inst->alg.halg.statesize = salg->statesize;
905         inst->alg.halg.base.cra_ctxsize = sizeof(struct cryptd_hash_ctx);
906 
907         inst->alg.halg.base.cra_init = cryptd_hash_init_tfm;
908         inst->alg.halg.base.cra_exit = cryptd_hash_exit_tfm;
909 
910         inst->alg.init   = cryptd_hash_init_enqueue;
911         inst->alg.update = cryptd_hash_update_enqueue;
912         inst->alg.final  = cryptd_hash_final_enqueue;
913         inst->alg.finup  = cryptd_hash_finup_enqueue;
914         inst->alg.export = cryptd_hash_export;
915         inst->alg.import = cryptd_hash_import;
916         inst->alg.setkey = cryptd_hash_setkey;
917         inst->alg.digest = cryptd_hash_digest_enqueue;
918 
919         err = ahash_register_instance(tmpl, inst);
920         if (err) {
921                 crypto_drop_shash(&ctx->spawn);
922 out_free_inst:
923                 kfree(inst);
924         }
925 
926 out_put_alg:
927         crypto_mod_put(alg);
928         return err;
929 }
930 
931 static int cryptd_aead_setkey(struct crypto_aead *parent,
932                               const u8 *key, unsigned int keylen)
933 {
934         struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
935         struct crypto_aead *child = ctx->child;
936 
937         return crypto_aead_setkey(child, key, keylen);
938 }
939 
940 static int cryptd_aead_setauthsize(struct crypto_aead *parent,
941                                    unsigned int authsize)
942 {
943         struct cryptd_aead_ctx *ctx = crypto_aead_ctx(parent);
944         struct crypto_aead *child = ctx->child;
945 
946         return crypto_aead_setauthsize(child, authsize);
947 }
948 
949 static void cryptd_aead_crypt(struct aead_request *req,
950                         struct crypto_aead *child,
951                         int err,
952                         int (*crypt)(struct aead_request *req))
953 {
954         struct cryptd_aead_request_ctx *rctx;
955         struct cryptd_aead_ctx *ctx;
956         crypto_completion_t compl;
957         struct crypto_aead *tfm;
958         int refcnt;
959 
960         rctx = aead_request_ctx(req);
961         compl = rctx->complete;
962 
963         tfm = crypto_aead_reqtfm(req);
964 
965         if (unlikely(err == -EINPROGRESS))
966                 goto out;
967         aead_request_set_tfm(req, child);
968         err = crypt( req );
969 
970 out:
971         ctx = crypto_aead_ctx(tfm);
972         refcnt = atomic_read(&ctx->refcnt);
973 
974         local_bh_disable();
975         compl(&req->base, err);
976         local_bh_enable();
977 
978         if (err != -EINPROGRESS && refcnt && atomic_dec_and_test(&ctx->refcnt))
979                 crypto_free_aead(tfm);
980 }
981 
982 static void cryptd_aead_encrypt(struct crypto_async_request *areq, int err)
983 {
984         struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
985         struct crypto_aead *child = ctx->child;
986         struct aead_request *req;
987 
988         req = container_of(areq, struct aead_request, base);
989         cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->encrypt);
990 }
991 
992 static void cryptd_aead_decrypt(struct crypto_async_request *areq, int err)
993 {
994         struct cryptd_aead_ctx *ctx = crypto_tfm_ctx(areq->tfm);
995         struct crypto_aead *child = ctx->child;
996         struct aead_request *req;
997 
998         req = container_of(areq, struct aead_request, base);
999         cryptd_aead_crypt(req, child, err, crypto_aead_alg(child)->decrypt);
1000 }
1001 
1002 static int cryptd_aead_enqueue(struct aead_request *req,
1003                                     crypto_completion_t compl)
1004 {
1005         struct cryptd_aead_request_ctx *rctx = aead_request_ctx(req);
1006         struct crypto_aead *tfm = crypto_aead_reqtfm(req);
1007         struct cryptd_queue *queue = cryptd_get_queue(crypto_aead_tfm(tfm));
1008 
1009         rctx->complete = req->base.complete;
1010         req->base.complete = compl;
1011         return cryptd_enqueue_request(queue, &req->base);
1012 }
1013 
1014 static int cryptd_aead_encrypt_enqueue(struct aead_request *req)
1015 {
1016         return cryptd_aead_enqueue(req, cryptd_aead_encrypt );
1017 }
1018 
1019 static int cryptd_aead_decrypt_enqueue(struct aead_request *req)
1020 {
1021         return cryptd_aead_enqueue(req, cryptd_aead_decrypt );
1022 }
1023 
1024 static int cryptd_aead_init_tfm(struct crypto_aead *tfm)
1025 {
1026         struct aead_instance *inst = aead_alg_instance(tfm);
1027         struct aead_instance_ctx *ictx = aead_instance_ctx(inst);
1028         struct crypto_aead_spawn *spawn = &ictx->aead_spawn;
1029         struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1030         struct crypto_aead *cipher;
1031 
1032         cipher = crypto_spawn_aead(spawn);
1033         if (IS_ERR(cipher))
1034                 return PTR_ERR(cipher);
1035 
1036         ctx->child = cipher;
1037         crypto_aead_set_reqsize(
1038                 tfm, max((unsigned)sizeof(struct cryptd_aead_request_ctx),
1039                          crypto_aead_reqsize(cipher)));
1040         return 0;
1041 }
1042 
1043 static void cryptd_aead_exit_tfm(struct crypto_aead *tfm)
1044 {
1045         struct cryptd_aead_ctx *ctx = crypto_aead_ctx(tfm);
1046         crypto_free_aead(ctx->child);
1047 }
1048 
1049 static int cryptd_create_aead(struct crypto_template *tmpl,
1050                               struct rtattr **tb,
1051                               struct cryptd_queue *queue)
1052 {
1053         struct aead_instance_ctx *ctx;
1054         struct aead_instance *inst;
1055         struct aead_alg *alg;
1056         const char *name;
1057         u32 type = 0;
1058         u32 mask = CRYPTO_ALG_ASYNC;
1059         int err;
1060 
1061         cryptd_check_internal(tb, &type, &mask);
1062 
1063         name = crypto_attr_alg_name(tb[1]);
1064         if (IS_ERR(name))
1065                 return PTR_ERR(name);
1066 
1067         inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
1068         if (!inst)
1069                 return -ENOMEM;
1070 
1071         ctx = aead_instance_ctx(inst);
1072         ctx->queue = queue;
1073 
1074         crypto_set_aead_spawn(&ctx->aead_spawn, aead_crypto_instance(inst));
1075         err = crypto_grab_aead(&ctx->aead_spawn, name, type, mask);
1076         if (err)
1077                 goto out_free_inst;
1078 
1079         alg = crypto_spawn_aead_alg(&ctx->aead_spawn);
1080         err = cryptd_init_instance(aead_crypto_instance(inst), &alg->base);
1081         if (err)
1082                 goto out_drop_aead;
1083 
1084         inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC |
1085                                    (alg->base.cra_flags & CRYPTO_ALG_INTERNAL);
1086         inst->alg.base.cra_ctxsize = sizeof(struct cryptd_aead_ctx);
1087 
1088         inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
1089         inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
1090 
1091         inst->alg.init = cryptd_aead_init_tfm;
1092         inst->alg.exit = cryptd_aead_exit_tfm;
1093         inst->alg.setkey = cryptd_aead_setkey;
1094         inst->alg.setauthsize = cryptd_aead_setauthsize;
1095         inst->alg.encrypt = cryptd_aead_encrypt_enqueue;
1096         inst->alg.decrypt = cryptd_aead_decrypt_enqueue;
1097 
1098         err = aead_register_instance(tmpl, inst);
1099         if (err) {
1100 out_drop_aead:
1101                 crypto_drop_aead(&ctx->aead_spawn);
1102 out_free_inst:
1103                 kfree(inst);
1104         }
1105         return err;
1106 }
1107 
1108 static struct cryptd_queue queue;
1109 
1110 static int cryptd_create(struct crypto_template *tmpl, struct rtattr **tb)
1111 {
1112         struct crypto_attr_type *algt;
1113 
1114         algt = crypto_get_attr_type(tb);
1115         if (IS_ERR(algt))
1116                 return PTR_ERR(algt);
1117 
1118         switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
1119         case CRYPTO_ALG_TYPE_BLKCIPHER:
1120                 if ((algt->type & CRYPTO_ALG_TYPE_MASK) ==
1121                     CRYPTO_ALG_TYPE_BLKCIPHER)
1122                         return cryptd_create_blkcipher(tmpl, tb, &queue);
1123 
1124                 return cryptd_create_skcipher(tmpl, tb, &queue);
1125         case CRYPTO_ALG_TYPE_DIGEST:
1126                 return cryptd_create_hash(tmpl, tb, &queue);
1127         case CRYPTO_ALG_TYPE_AEAD:
1128                 return cryptd_create_aead(tmpl, tb, &queue);
1129         }
1130 
1131         return -EINVAL;
1132 }
1133 
1134 static void cryptd_free(struct crypto_instance *inst)
1135 {
1136         struct cryptd_instance_ctx *ctx = crypto_instance_ctx(inst);
1137         struct hashd_instance_ctx *hctx = crypto_instance_ctx(inst);
1138         struct aead_instance_ctx *aead_ctx = crypto_instance_ctx(inst);
1139 
1140         switch (inst->alg.cra_flags & CRYPTO_ALG_TYPE_MASK) {
1141         case CRYPTO_ALG_TYPE_AHASH:
1142                 crypto_drop_shash(&hctx->spawn);
1143                 kfree(ahash_instance(inst));
1144                 return;
1145         case CRYPTO_ALG_TYPE_AEAD:
1146                 crypto_drop_aead(&aead_ctx->aead_spawn);
1147                 kfree(aead_instance(inst));
1148                 return;
1149         default:
1150                 crypto_drop_spawn(&ctx->spawn);
1151                 kfree(inst);
1152         }
1153 }
1154 
1155 static struct crypto_template cryptd_tmpl = {
1156         .name = "cryptd",
1157         .create = cryptd_create,
1158         .free = cryptd_free,
1159         .module = THIS_MODULE,
1160 };
1161 
1162 struct cryptd_ablkcipher *cryptd_alloc_ablkcipher(const char *alg_name,
1163                                                   u32 type, u32 mask)
1164 {
1165         char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1166         struct cryptd_blkcipher_ctx *ctx;
1167         struct crypto_tfm *tfm;
1168 
1169         if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1170                      "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1171                 return ERR_PTR(-EINVAL);
1172         type = crypto_skcipher_type(type);
1173         mask &= ~CRYPTO_ALG_TYPE_MASK;
1174         mask |= (CRYPTO_ALG_GENIV | CRYPTO_ALG_TYPE_BLKCIPHER_MASK);
1175         tfm = crypto_alloc_base(cryptd_alg_name, type, mask);
1176         if (IS_ERR(tfm))
1177                 return ERR_CAST(tfm);
1178         if (tfm->__crt_alg->cra_module != THIS_MODULE) {
1179                 crypto_free_tfm(tfm);
1180                 return ERR_PTR(-EINVAL);
1181         }
1182 
1183         ctx = crypto_tfm_ctx(tfm);
1184         atomic_set(&ctx->refcnt, 1);
1185 
1186         return __cryptd_ablkcipher_cast(__crypto_ablkcipher_cast(tfm));
1187 }
1188 EXPORT_SYMBOL_GPL(cryptd_alloc_ablkcipher);
1189 
1190 struct crypto_blkcipher *cryptd_ablkcipher_child(struct cryptd_ablkcipher *tfm)
1191 {
1192         struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1193         return ctx->child;
1194 }
1195 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_child);
1196 
1197 bool cryptd_ablkcipher_queued(struct cryptd_ablkcipher *tfm)
1198 {
1199         struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1200 
1201         return atomic_read(&ctx->refcnt) - 1;
1202 }
1203 EXPORT_SYMBOL_GPL(cryptd_ablkcipher_queued);
1204 
1205 void cryptd_free_ablkcipher(struct cryptd_ablkcipher *tfm)
1206 {
1207         struct cryptd_blkcipher_ctx *ctx = crypto_ablkcipher_ctx(&tfm->base);
1208 
1209         if (atomic_dec_and_test(&ctx->refcnt))
1210                 crypto_free_ablkcipher(&tfm->base);
1211 }
1212 EXPORT_SYMBOL_GPL(cryptd_free_ablkcipher);
1213 
1214 struct cryptd_skcipher *cryptd_alloc_skcipher(const char *alg_name,
1215                                               u32 type, u32 mask)
1216 {
1217         char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1218         struct cryptd_skcipher_ctx *ctx;
1219         struct crypto_skcipher *tfm;
1220 
1221         if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1222                      "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1223                 return ERR_PTR(-EINVAL);
1224 
1225         tfm = crypto_alloc_skcipher(cryptd_alg_name, type, mask);
1226         if (IS_ERR(tfm))
1227                 return ERR_CAST(tfm);
1228 
1229         if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1230                 crypto_free_skcipher(tfm);
1231                 return ERR_PTR(-EINVAL);
1232         }
1233 
1234         ctx = crypto_skcipher_ctx(tfm);
1235         atomic_set(&ctx->refcnt, 1);
1236 
1237         return container_of(tfm, struct cryptd_skcipher, base);
1238 }
1239 EXPORT_SYMBOL_GPL(cryptd_alloc_skcipher);
1240 
1241 struct crypto_skcipher *cryptd_skcipher_child(struct cryptd_skcipher *tfm)
1242 {
1243         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1244 
1245         return ctx->child;
1246 }
1247 EXPORT_SYMBOL_GPL(cryptd_skcipher_child);
1248 
1249 bool cryptd_skcipher_queued(struct cryptd_skcipher *tfm)
1250 {
1251         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1252 
1253         return atomic_read(&ctx->refcnt) - 1;
1254 }
1255 EXPORT_SYMBOL_GPL(cryptd_skcipher_queued);
1256 
1257 void cryptd_free_skcipher(struct cryptd_skcipher *tfm)
1258 {
1259         struct cryptd_skcipher_ctx *ctx = crypto_skcipher_ctx(&tfm->base);
1260 
1261         if (atomic_dec_and_test(&ctx->refcnt))
1262                 crypto_free_skcipher(&tfm->base);
1263 }
1264 EXPORT_SYMBOL_GPL(cryptd_free_skcipher);
1265 
1266 struct cryptd_ahash *cryptd_alloc_ahash(const char *alg_name,
1267                                         u32 type, u32 mask)
1268 {
1269         char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1270         struct cryptd_hash_ctx *ctx;
1271         struct crypto_ahash *tfm;
1272 
1273         if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1274                      "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1275                 return ERR_PTR(-EINVAL);
1276         tfm = crypto_alloc_ahash(cryptd_alg_name, type, mask);
1277         if (IS_ERR(tfm))
1278                 return ERR_CAST(tfm);
1279         if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1280                 crypto_free_ahash(tfm);
1281                 return ERR_PTR(-EINVAL);
1282         }
1283 
1284         ctx = crypto_ahash_ctx(tfm);
1285         atomic_set(&ctx->refcnt, 1);
1286 
1287         return __cryptd_ahash_cast(tfm);
1288 }
1289 EXPORT_SYMBOL_GPL(cryptd_alloc_ahash);
1290 
1291 struct crypto_shash *cryptd_ahash_child(struct cryptd_ahash *tfm)
1292 {
1293         struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1294 
1295         return ctx->child;
1296 }
1297 EXPORT_SYMBOL_GPL(cryptd_ahash_child);
1298 
1299 struct shash_desc *cryptd_shash_desc(struct ahash_request *req)
1300 {
1301         struct cryptd_hash_request_ctx *rctx = ahash_request_ctx(req);
1302         return &rctx->desc;
1303 }
1304 EXPORT_SYMBOL_GPL(cryptd_shash_desc);
1305 
1306 bool cryptd_ahash_queued(struct cryptd_ahash *tfm)
1307 {
1308         struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1309 
1310         return atomic_read(&ctx->refcnt) - 1;
1311 }
1312 EXPORT_SYMBOL_GPL(cryptd_ahash_queued);
1313 
1314 void cryptd_free_ahash(struct cryptd_ahash *tfm)
1315 {
1316         struct cryptd_hash_ctx *ctx = crypto_ahash_ctx(&tfm->base);
1317 
1318         if (atomic_dec_and_test(&ctx->refcnt))
1319                 crypto_free_ahash(&tfm->base);
1320 }
1321 EXPORT_SYMBOL_GPL(cryptd_free_ahash);
1322 
1323 struct cryptd_aead *cryptd_alloc_aead(const char *alg_name,
1324                                                   u32 type, u32 mask)
1325 {
1326         char cryptd_alg_name[CRYPTO_MAX_ALG_NAME];
1327         struct cryptd_aead_ctx *ctx;
1328         struct crypto_aead *tfm;
1329 
1330         if (snprintf(cryptd_alg_name, CRYPTO_MAX_ALG_NAME,
1331                      "cryptd(%s)", alg_name) >= CRYPTO_MAX_ALG_NAME)
1332                 return ERR_PTR(-EINVAL);
1333         tfm = crypto_alloc_aead(cryptd_alg_name, type, mask);
1334         if (IS_ERR(tfm))
1335                 return ERR_CAST(tfm);
1336         if (tfm->base.__crt_alg->cra_module != THIS_MODULE) {
1337                 crypto_free_aead(tfm);
1338                 return ERR_PTR(-EINVAL);
1339         }
1340 
1341         ctx = crypto_aead_ctx(tfm);
1342         atomic_set(&ctx->refcnt, 1);
1343 
1344         return __cryptd_aead_cast(tfm);
1345 }
1346 EXPORT_SYMBOL_GPL(cryptd_alloc_aead);
1347 
1348 struct crypto_aead *cryptd_aead_child(struct cryptd_aead *tfm)
1349 {
1350         struct cryptd_aead_ctx *ctx;
1351         ctx = crypto_aead_ctx(&tfm->base);
1352         return ctx->child;
1353 }
1354 EXPORT_SYMBOL_GPL(cryptd_aead_child);
1355 
1356 bool cryptd_aead_queued(struct cryptd_aead *tfm)
1357 {
1358         struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1359 
1360         return atomic_read(&ctx->refcnt) - 1;
1361 }
1362 EXPORT_SYMBOL_GPL(cryptd_aead_queued);
1363 
1364 void cryptd_free_aead(struct cryptd_aead *tfm)
1365 {
1366         struct cryptd_aead_ctx *ctx = crypto_aead_ctx(&tfm->base);
1367 
1368         if (atomic_dec_and_test(&ctx->refcnt))
1369                 crypto_free_aead(&tfm->base);
1370 }
1371 EXPORT_SYMBOL_GPL(cryptd_free_aead);
1372 
1373 static int __init cryptd_init(void)
1374 {
1375         int err;
1376 
1377         err = cryptd_init_queue(&queue, CRYPTD_MAX_CPU_QLEN);
1378         if (err)
1379                 return err;
1380 
1381         err = crypto_register_template(&cryptd_tmpl);
1382         if (err)
1383                 cryptd_fini_queue(&queue);
1384 
1385         return err;
1386 }
1387 
1388 static void __exit cryptd_exit(void)
1389 {
1390         cryptd_fini_queue(&queue);
1391         crypto_unregister_template(&cryptd_tmpl);
1392 }
1393 
1394 subsys_initcall(cryptd_init);
1395 module_exit(cryptd_exit);
1396 
1397 MODULE_LICENSE("GPL");
1398 MODULE_DESCRIPTION("Software async crypto daemon");
1399 MODULE_ALIAS_CRYPTO("cryptd");
1400 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us