Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/crypto/pcrypt.c

  1 /*
  2  * pcrypt - Parallel crypto wrapper.
  3  *
  4  * Copyright (C) 2009 secunet Security Networks AG
  5  * Copyright (C) 2009 Steffen Klassert <steffen.klassert@secunet.com>
  6  *
  7  * This program is free software; you can redistribute it and/or modify it
  8  * under the terms and conditions of the GNU General Public License,
  9  * version 2, as published by the Free Software Foundation.
 10  *
 11  * This program is distributed in the hope it will be useful, but WITHOUT
 12  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 13  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
 14  * more details.
 15  *
 16  * You should have received a copy of the GNU General Public License along with
 17  * this program; if not, write to the Free Software Foundation, Inc.,
 18  * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
 19  */
 20 
 21 #include <crypto/algapi.h>
 22 #include <crypto/internal/aead.h>
 23 #include <linux/atomic.h>
 24 #include <linux/err.h>
 25 #include <linux/init.h>
 26 #include <linux/module.h>
 27 #include <linux/slab.h>
 28 #include <linux/notifier.h>
 29 #include <linux/kobject.h>
 30 #include <linux/cpu.h>
 31 #include <crypto/pcrypt.h>
 32 
 33 struct padata_pcrypt {
 34         struct padata_instance *pinst;
 35         struct workqueue_struct *wq;
 36 
 37         /*
 38          * Cpumask for callback CPUs. It should be
 39          * equal to serial cpumask of corresponding padata instance,
 40          * so it is updated when padata notifies us about serial
 41          * cpumask change.
 42          *
 43          * cb_cpumask is protected by RCU. This fact prevents us from
 44          * using cpumask_var_t directly because the actual type of
 45          * cpumsak_var_t depends on kernel configuration(particularly on
 46          * CONFIG_CPUMASK_OFFSTACK macro). Depending on the configuration
 47          * cpumask_var_t may be either a pointer to the struct cpumask
 48          * or a variable allocated on the stack. Thus we can not safely use
 49          * cpumask_var_t with RCU operations such as rcu_assign_pointer or
 50          * rcu_dereference. So cpumask_var_t is wrapped with struct
 51          * pcrypt_cpumask which makes possible to use it with RCU.
 52          */
 53         struct pcrypt_cpumask {
 54                 cpumask_var_t mask;
 55         } *cb_cpumask;
 56         struct notifier_block nblock;
 57 };
 58 
 59 static struct padata_pcrypt pencrypt;
 60 static struct padata_pcrypt pdecrypt;
 61 static struct kset           *pcrypt_kset;
 62 
 63 struct pcrypt_instance_ctx {
 64         struct crypto_aead_spawn spawn;
 65         atomic_t tfm_count;
 66 };
 67 
 68 struct pcrypt_aead_ctx {
 69         struct crypto_aead *child;
 70         unsigned int cb_cpu;
 71 };
 72 
 73 static int pcrypt_do_parallel(struct padata_priv *padata, unsigned int *cb_cpu,
 74                               struct padata_pcrypt *pcrypt)
 75 {
 76         unsigned int cpu_index, cpu, i;
 77         struct pcrypt_cpumask *cpumask;
 78 
 79         cpu = *cb_cpu;
 80 
 81         rcu_read_lock_bh();
 82         cpumask = rcu_dereference_bh(pcrypt->cb_cpumask);
 83         if (cpumask_test_cpu(cpu, cpumask->mask))
 84                         goto out;
 85 
 86         if (!cpumask_weight(cpumask->mask))
 87                         goto out;
 88 
 89         cpu_index = cpu % cpumask_weight(cpumask->mask);
 90 
 91         cpu = cpumask_first(cpumask->mask);
 92         for (i = 0; i < cpu_index; i++)
 93                 cpu = cpumask_next(cpu, cpumask->mask);
 94 
 95         *cb_cpu = cpu;
 96 
 97 out:
 98         rcu_read_unlock_bh();
 99         return padata_do_parallel(pcrypt->pinst, padata, cpu);
100 }
101 
102 static int pcrypt_aead_setkey(struct crypto_aead *parent,
103                               const u8 *key, unsigned int keylen)
104 {
105         struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
106 
107         return crypto_aead_setkey(ctx->child, key, keylen);
108 }
109 
110 static int pcrypt_aead_setauthsize(struct crypto_aead *parent,
111                                    unsigned int authsize)
112 {
113         struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(parent);
114 
115         return crypto_aead_setauthsize(ctx->child, authsize);
116 }
117 
118 static void pcrypt_aead_serial(struct padata_priv *padata)
119 {
120         struct pcrypt_request *preq = pcrypt_padata_request(padata);
121         struct aead_request *req = pcrypt_request_ctx(preq);
122 
123         aead_request_complete(req->base.data, padata->info);
124 }
125 
126 static void pcrypt_aead_done(struct crypto_async_request *areq, int err)
127 {
128         struct aead_request *req = areq->data;
129         struct pcrypt_request *preq = aead_request_ctx(req);
130         struct padata_priv *padata = pcrypt_request_padata(preq);
131 
132         padata->info = err;
133         req->base.flags &= ~CRYPTO_TFM_REQ_MAY_SLEEP;
134 
135         padata_do_serial(padata);
136 }
137 
138 static void pcrypt_aead_enc(struct padata_priv *padata)
139 {
140         struct pcrypt_request *preq = pcrypt_padata_request(padata);
141         struct aead_request *req = pcrypt_request_ctx(preq);
142 
143         padata->info = crypto_aead_encrypt(req);
144 
145         if (padata->info == -EINPROGRESS)
146                 return;
147 
148         padata_do_serial(padata);
149 }
150 
151 static int pcrypt_aead_encrypt(struct aead_request *req)
152 {
153         int err;
154         struct pcrypt_request *preq = aead_request_ctx(req);
155         struct aead_request *creq = pcrypt_request_ctx(preq);
156         struct padata_priv *padata = pcrypt_request_padata(preq);
157         struct crypto_aead *aead = crypto_aead_reqtfm(req);
158         struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
159         u32 flags = aead_request_flags(req);
160 
161         memset(padata, 0, sizeof(struct padata_priv));
162 
163         padata->parallel = pcrypt_aead_enc;
164         padata->serial = pcrypt_aead_serial;
165 
166         aead_request_set_tfm(creq, ctx->child);
167         aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
168                                   pcrypt_aead_done, req);
169         aead_request_set_crypt(creq, req->src, req->dst,
170                                req->cryptlen, req->iv);
171         aead_request_set_ad(creq, req->assoclen);
172 
173         err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pencrypt);
174         if (!err)
175                 return -EINPROGRESS;
176 
177         return err;
178 }
179 
180 static void pcrypt_aead_dec(struct padata_priv *padata)
181 {
182         struct pcrypt_request *preq = pcrypt_padata_request(padata);
183         struct aead_request *req = pcrypt_request_ctx(preq);
184 
185         padata->info = crypto_aead_decrypt(req);
186 
187         if (padata->info == -EINPROGRESS)
188                 return;
189 
190         padata_do_serial(padata);
191 }
192 
193 static int pcrypt_aead_decrypt(struct aead_request *req)
194 {
195         int err;
196         struct pcrypt_request *preq = aead_request_ctx(req);
197         struct aead_request *creq = pcrypt_request_ctx(preq);
198         struct padata_priv *padata = pcrypt_request_padata(preq);
199         struct crypto_aead *aead = crypto_aead_reqtfm(req);
200         struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(aead);
201         u32 flags = aead_request_flags(req);
202 
203         memset(padata, 0, sizeof(struct padata_priv));
204 
205         padata->parallel = pcrypt_aead_dec;
206         padata->serial = pcrypt_aead_serial;
207 
208         aead_request_set_tfm(creq, ctx->child);
209         aead_request_set_callback(creq, flags & ~CRYPTO_TFM_REQ_MAY_SLEEP,
210                                   pcrypt_aead_done, req);
211         aead_request_set_crypt(creq, req->src, req->dst,
212                                req->cryptlen, req->iv);
213         aead_request_set_ad(creq, req->assoclen);
214 
215         err = pcrypt_do_parallel(padata, &ctx->cb_cpu, &pdecrypt);
216         if (!err)
217                 return -EINPROGRESS;
218 
219         return err;
220 }
221 
222 static int pcrypt_aead_init_tfm(struct crypto_aead *tfm)
223 {
224         int cpu, cpu_index;
225         struct aead_instance *inst = aead_alg_instance(tfm);
226         struct pcrypt_instance_ctx *ictx = aead_instance_ctx(inst);
227         struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
228         struct crypto_aead *cipher;
229 
230         cpu_index = (unsigned int)atomic_inc_return(&ictx->tfm_count) %
231                     cpumask_weight(cpu_online_mask);
232 
233         ctx->cb_cpu = cpumask_first(cpu_online_mask);
234         for (cpu = 0; cpu < cpu_index; cpu++)
235                 ctx->cb_cpu = cpumask_next(ctx->cb_cpu, cpu_online_mask);
236 
237         cipher = crypto_spawn_aead(&ictx->spawn);
238 
239         if (IS_ERR(cipher))
240                 return PTR_ERR(cipher);
241 
242         ctx->child = cipher;
243         crypto_aead_set_reqsize(tfm, sizeof(struct pcrypt_request) +
244                                      sizeof(struct aead_request) +
245                                      crypto_aead_reqsize(cipher));
246 
247         return 0;
248 }
249 
250 static void pcrypt_aead_exit_tfm(struct crypto_aead *tfm)
251 {
252         struct pcrypt_aead_ctx *ctx = crypto_aead_ctx(tfm);
253 
254         crypto_free_aead(ctx->child);
255 }
256 
257 static int pcrypt_init_instance(struct crypto_instance *inst,
258                                 struct crypto_alg *alg)
259 {
260         if (snprintf(inst->alg.cra_driver_name, CRYPTO_MAX_ALG_NAME,
261                      "pcrypt(%s)", alg->cra_driver_name) >= CRYPTO_MAX_ALG_NAME)
262                 return -ENAMETOOLONG;
263 
264         memcpy(inst->alg.cra_name, alg->cra_name, CRYPTO_MAX_ALG_NAME);
265 
266         inst->alg.cra_priority = alg->cra_priority + 100;
267         inst->alg.cra_blocksize = alg->cra_blocksize;
268         inst->alg.cra_alignmask = alg->cra_alignmask;
269 
270         return 0;
271 }
272 
273 static int pcrypt_create_aead(struct crypto_template *tmpl, struct rtattr **tb,
274                               u32 type, u32 mask)
275 {
276         struct pcrypt_instance_ctx *ctx;
277         struct crypto_attr_type *algt;
278         struct aead_instance *inst;
279         struct aead_alg *alg;
280         const char *name;
281         int err;
282 
283         algt = crypto_get_attr_type(tb);
284         if (IS_ERR(algt))
285                 return PTR_ERR(algt);
286 
287         name = crypto_attr_alg_name(tb[1]);
288         if (IS_ERR(name))
289                 return PTR_ERR(name);
290 
291         inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
292         if (!inst)
293                 return -ENOMEM;
294 
295         ctx = aead_instance_ctx(inst);
296         crypto_set_aead_spawn(&ctx->spawn, aead_crypto_instance(inst));
297 
298         err = crypto_grab_aead(&ctx->spawn, name, 0, 0);
299         if (err)
300                 goto out_free_inst;
301 
302         alg = crypto_spawn_aead_alg(&ctx->spawn);
303         err = pcrypt_init_instance(aead_crypto_instance(inst), &alg->base);
304         if (err)
305                 goto out_drop_aead;
306 
307         inst->alg.base.cra_flags = CRYPTO_ALG_ASYNC;
308 
309         inst->alg.ivsize = crypto_aead_alg_ivsize(alg);
310         inst->alg.maxauthsize = crypto_aead_alg_maxauthsize(alg);
311 
312         inst->alg.base.cra_ctxsize = sizeof(struct pcrypt_aead_ctx);
313 
314         inst->alg.init = pcrypt_aead_init_tfm;
315         inst->alg.exit = pcrypt_aead_exit_tfm;
316 
317         inst->alg.setkey = pcrypt_aead_setkey;
318         inst->alg.setauthsize = pcrypt_aead_setauthsize;
319         inst->alg.encrypt = pcrypt_aead_encrypt;
320         inst->alg.decrypt = pcrypt_aead_decrypt;
321 
322         err = aead_register_instance(tmpl, inst);
323         if (err)
324                 goto out_drop_aead;
325 
326 out:
327         return err;
328 
329 out_drop_aead:
330         crypto_drop_aead(&ctx->spawn);
331 out_free_inst:
332         kfree(inst);
333         goto out;
334 }
335 
336 static int pcrypt_create(struct crypto_template *tmpl, struct rtattr **tb)
337 {
338         struct crypto_attr_type *algt;
339 
340         algt = crypto_get_attr_type(tb);
341         if (IS_ERR(algt))
342                 return PTR_ERR(algt);
343 
344         switch (algt->type & algt->mask & CRYPTO_ALG_TYPE_MASK) {
345         case CRYPTO_ALG_TYPE_AEAD:
346                 return pcrypt_create_aead(tmpl, tb, algt->type, algt->mask);
347         }
348 
349         return -EINVAL;
350 }
351 
352 static void pcrypt_free(struct crypto_instance *inst)
353 {
354         struct pcrypt_instance_ctx *ctx = crypto_instance_ctx(inst);
355 
356         crypto_drop_aead(&ctx->spawn);
357         kfree(inst);
358 }
359 
360 static int pcrypt_cpumask_change_notify(struct notifier_block *self,
361                                         unsigned long val, void *data)
362 {
363         struct padata_pcrypt *pcrypt;
364         struct pcrypt_cpumask *new_mask, *old_mask;
365         struct padata_cpumask *cpumask = (struct padata_cpumask *)data;
366 
367         if (!(val & PADATA_CPU_SERIAL))
368                 return 0;
369 
370         pcrypt = container_of(self, struct padata_pcrypt, nblock);
371         new_mask = kmalloc(sizeof(*new_mask), GFP_KERNEL);
372         if (!new_mask)
373                 return -ENOMEM;
374         if (!alloc_cpumask_var(&new_mask->mask, GFP_KERNEL)) {
375                 kfree(new_mask);
376                 return -ENOMEM;
377         }
378 
379         old_mask = pcrypt->cb_cpumask;
380 
381         cpumask_copy(new_mask->mask, cpumask->cbcpu);
382         rcu_assign_pointer(pcrypt->cb_cpumask, new_mask);
383         synchronize_rcu_bh();
384 
385         free_cpumask_var(old_mask->mask);
386         kfree(old_mask);
387         return 0;
388 }
389 
390 static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
391 {
392         int ret;
393 
394         pinst->kobj.kset = pcrypt_kset;
395         ret = kobject_add(&pinst->kobj, NULL, name);
396         if (!ret)
397                 kobject_uevent(&pinst->kobj, KOBJ_ADD);
398 
399         return ret;
400 }
401 
402 static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
403                               const char *name)
404 {
405         int ret = -ENOMEM;
406         struct pcrypt_cpumask *mask;
407 
408         get_online_cpus();
409 
410         pcrypt->wq = alloc_workqueue("%s", WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE,
411                                      1, name);
412         if (!pcrypt->wq)
413                 goto err;
414 
415         pcrypt->pinst = padata_alloc_possible(pcrypt->wq);
416         if (!pcrypt->pinst)
417                 goto err_destroy_workqueue;
418 
419         mask = kmalloc(sizeof(*mask), GFP_KERNEL);
420         if (!mask)
421                 goto err_free_padata;
422         if (!alloc_cpumask_var(&mask->mask, GFP_KERNEL)) {
423                 kfree(mask);
424                 goto err_free_padata;
425         }
426 
427         cpumask_and(mask->mask, cpu_possible_mask, cpu_online_mask);
428         rcu_assign_pointer(pcrypt->cb_cpumask, mask);
429 
430         pcrypt->nblock.notifier_call = pcrypt_cpumask_change_notify;
431         ret = padata_register_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
432         if (ret)
433                 goto err_free_cpumask;
434 
435         ret = pcrypt_sysfs_add(pcrypt->pinst, name);
436         if (ret)
437                 goto err_unregister_notifier;
438 
439         put_online_cpus();
440 
441         return ret;
442 
443 err_unregister_notifier:
444         padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
445 err_free_cpumask:
446         free_cpumask_var(mask->mask);
447         kfree(mask);
448 err_free_padata:
449         padata_free(pcrypt->pinst);
450 err_destroy_workqueue:
451         destroy_workqueue(pcrypt->wq);
452 err:
453         put_online_cpus();
454 
455         return ret;
456 }
457 
458 static void pcrypt_fini_padata(struct padata_pcrypt *pcrypt)
459 {
460         free_cpumask_var(pcrypt->cb_cpumask->mask);
461         kfree(pcrypt->cb_cpumask);
462 
463         padata_stop(pcrypt->pinst);
464         padata_unregister_cpumask_notifier(pcrypt->pinst, &pcrypt->nblock);
465         destroy_workqueue(pcrypt->wq);
466         padata_free(pcrypt->pinst);
467 }
468 
469 static struct crypto_template pcrypt_tmpl = {
470         .name = "pcrypt",
471         .create = pcrypt_create,
472         .free = pcrypt_free,
473         .module = THIS_MODULE,
474 };
475 
476 static int __init pcrypt_init(void)
477 {
478         int err = -ENOMEM;
479 
480         pcrypt_kset = kset_create_and_add("pcrypt", NULL, kernel_kobj);
481         if (!pcrypt_kset)
482                 goto err;
483 
484         err = pcrypt_init_padata(&pencrypt, "pencrypt");
485         if (err)
486                 goto err_unreg_kset;
487 
488         err = pcrypt_init_padata(&pdecrypt, "pdecrypt");
489         if (err)
490                 goto err_deinit_pencrypt;
491 
492         padata_start(pencrypt.pinst);
493         padata_start(pdecrypt.pinst);
494 
495         return crypto_register_template(&pcrypt_tmpl);
496 
497 err_deinit_pencrypt:
498         pcrypt_fini_padata(&pencrypt);
499 err_unreg_kset:
500         kset_unregister(pcrypt_kset);
501 err:
502         return err;
503 }
504 
505 static void __exit pcrypt_exit(void)
506 {
507         pcrypt_fini_padata(&pencrypt);
508         pcrypt_fini_padata(&pdecrypt);
509 
510         kset_unregister(pcrypt_kset);
511         crypto_unregister_template(&pcrypt_tmpl);
512 }
513 
514 module_init(pcrypt_init);
515 module_exit(pcrypt_exit);
516 
517 MODULE_LICENSE("GPL");
518 MODULE_AUTHOR("Steffen Klassert <steffen.klassert@secunet.com>");
519 MODULE_DESCRIPTION("Parallel crypto wrapper");
520 MODULE_ALIAS_CRYPTO("pcrypt");
521 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us