Version:  2.0.40 2.2.26 2.4.37 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9 4.10

Linux/crypto/xts.c

  1 /* XTS: as defined in IEEE1619/D16
  2  *      http://grouper.ieee.org/groups/1619/email/pdf00086.pdf
  3  *      (sector sizes which are not a multiple of 16 bytes are,
  4  *      however currently unsupported)
  5  *
  6  * Copyright (c) 2007 Rik Snel <rsnel@cube.dyndns.org>
  7  *
  8  * Based on ecb.c
  9  * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au>
 10  *
 11  * This program is free software; you can redistribute it and/or modify it
 12  * under the terms of the GNU General Public License as published by the Free
 13  * Software Foundation; either version 2 of the License, or (at your option)
 14  * any later version.
 15  */
 16 #include <crypto/internal/skcipher.h>
 17 #include <crypto/scatterwalk.h>
 18 #include <linux/err.h>
 19 #include <linux/init.h>
 20 #include <linux/kernel.h>
 21 #include <linux/module.h>
 22 #include <linux/scatterlist.h>
 23 #include <linux/slab.h>
 24 
 25 #include <crypto/xts.h>
 26 #include <crypto/b128ops.h>
 27 #include <crypto/gf128mul.h>
 28 
 29 #define XTS_BUFFER_SIZE 128u
 30 
 31 struct priv {
 32         struct crypto_skcipher *child;
 33         struct crypto_cipher *tweak;
 34 };
 35 
 36 struct xts_instance_ctx {
 37         struct crypto_skcipher_spawn spawn;
 38         char name[CRYPTO_MAX_ALG_NAME];
 39 };
 40 
 41 struct rctx {
 42         be128 buf[XTS_BUFFER_SIZE / sizeof(be128)];
 43 
 44         be128 t;
 45 
 46         be128 *ext;
 47 
 48         struct scatterlist srcbuf[2];
 49         struct scatterlist dstbuf[2];
 50         struct scatterlist *src;
 51         struct scatterlist *dst;
 52 
 53         unsigned int left;
 54 
 55         struct skcipher_request subreq;
 56 };
 57 
 58 static int setkey(struct crypto_skcipher *parent, const u8 *key,
 59                   unsigned int keylen)
 60 {
 61         struct priv *ctx = crypto_skcipher_ctx(parent);
 62         struct crypto_skcipher *child;
 63         struct crypto_cipher *tweak;
 64         int err;
 65 
 66         err = xts_verify_key(parent, key, keylen);
 67         if (err)
 68                 return err;
 69 
 70         keylen /= 2;
 71 
 72         /* we need two cipher instances: one to compute the initial 'tweak'
 73          * by encrypting the IV (usually the 'plain' iv) and the other
 74          * one to encrypt and decrypt the data */
 75 
 76         /* tweak cipher, uses Key2 i.e. the second half of *key */
 77         tweak = ctx->tweak;
 78         crypto_cipher_clear_flags(tweak, CRYPTO_TFM_REQ_MASK);
 79         crypto_cipher_set_flags(tweak, crypto_skcipher_get_flags(parent) &
 80                                        CRYPTO_TFM_REQ_MASK);
 81         err = crypto_cipher_setkey(tweak, key + keylen, keylen);
 82         crypto_skcipher_set_flags(parent, crypto_cipher_get_flags(tweak) &
 83                                           CRYPTO_TFM_RES_MASK);
 84         if (err)
 85                 return err;
 86 
 87         /* data cipher, uses Key1 i.e. the first half of *key */
 88         child = ctx->child;
 89         crypto_skcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK);
 90         crypto_skcipher_set_flags(child, crypto_skcipher_get_flags(parent) &
 91                                          CRYPTO_TFM_REQ_MASK);
 92         err = crypto_skcipher_setkey(child, key, keylen);
 93         crypto_skcipher_set_flags(parent, crypto_skcipher_get_flags(child) &
 94                                           CRYPTO_TFM_RES_MASK);
 95 
 96         return err;
 97 }
 98 
 99 static int post_crypt(struct skcipher_request *req)
100 {
101         struct rctx *rctx = skcipher_request_ctx(req);
102         be128 *buf = rctx->ext ?: rctx->buf;
103         struct skcipher_request *subreq;
104         const int bs = XTS_BLOCK_SIZE;
105         struct skcipher_walk w;
106         struct scatterlist *sg;
107         unsigned offset;
108         int err;
109 
110         subreq = &rctx->subreq;
111         err = skcipher_walk_virt(&w, subreq, false);
112 
113         while (w.nbytes) {
114                 unsigned int avail = w.nbytes;
115                 be128 *wdst;
116 
117                 wdst = w.dst.virt.addr;
118 
119                 do {
120                         be128_xor(wdst, buf++, wdst);
121                         wdst++;
122                 } while ((avail -= bs) >= bs);
123 
124                 err = skcipher_walk_done(&w, avail);
125         }
126 
127         rctx->left -= subreq->cryptlen;
128 
129         if (err || !rctx->left)
130                 goto out;
131 
132         rctx->dst = rctx->dstbuf;
133 
134         scatterwalk_done(&w.out, 0, 1);
135         sg = w.out.sg;
136         offset = w.out.offset;
137 
138         if (rctx->dst != sg) {
139                 rctx->dst[0] = *sg;
140                 sg_unmark_end(rctx->dst);
141                 scatterwalk_crypto_chain(rctx->dst, sg_next(sg), 0, 2);
142         }
143         rctx->dst[0].length -= offset - sg->offset;
144         rctx->dst[0].offset = offset;
145 
146 out:
147         return err;
148 }
149 
150 static int pre_crypt(struct skcipher_request *req)
151 {
152         struct rctx *rctx = skcipher_request_ctx(req);
153         be128 *buf = rctx->ext ?: rctx->buf;
154         struct skcipher_request *subreq;
155         const int bs = XTS_BLOCK_SIZE;
156         struct skcipher_walk w;
157         struct scatterlist *sg;
158         unsigned cryptlen;
159         unsigned offset;
160         bool more;
161         int err;
162 
163         subreq = &rctx->subreq;
164         cryptlen = subreq->cryptlen;
165 
166         more = rctx->left > cryptlen;
167         if (!more)
168                 cryptlen = rctx->left;
169 
170         skcipher_request_set_crypt(subreq, rctx->src, rctx->dst,
171                                    cryptlen, NULL);
172 
173         err = skcipher_walk_virt(&w, subreq, false);
174 
175         while (w.nbytes) {
176                 unsigned int avail = w.nbytes;
177                 be128 *wsrc;
178                 be128 *wdst;
179 
180                 wsrc = w.src.virt.addr;
181                 wdst = w.dst.virt.addr;
182 
183                 do {
184                         *buf++ = rctx->t;
185                         be128_xor(wdst++, &rctx->t, wsrc++);
186                         gf128mul_x_ble(&rctx->t, &rctx->t);
187                 } while ((avail -= bs) >= bs);
188 
189                 err = skcipher_walk_done(&w, avail);
190         }
191 
192         skcipher_request_set_crypt(subreq, rctx->dst, rctx->dst,
193                                    cryptlen, NULL);
194 
195         if (err || !more)
196                 goto out;
197 
198         rctx->src = rctx->srcbuf;
199 
200         scatterwalk_done(&w.in, 0, 1);
201         sg = w.in.sg;
202         offset = w.in.offset;
203 
204         if (rctx->src != sg) {
205                 rctx->src[0] = *sg;
206                 sg_unmark_end(rctx->src);
207                 scatterwalk_crypto_chain(rctx->src, sg_next(sg), 0, 2);
208         }
209         rctx->src[0].length -= offset - sg->offset;
210         rctx->src[0].offset = offset;
211 
212 out:
213         return err;
214 }
215 
216 static int init_crypt(struct skcipher_request *req, crypto_completion_t done)
217 {
218         struct priv *ctx = crypto_skcipher_ctx(crypto_skcipher_reqtfm(req));
219         struct rctx *rctx = skcipher_request_ctx(req);
220         struct skcipher_request *subreq;
221         gfp_t gfp;
222 
223         subreq = &rctx->subreq;
224         skcipher_request_set_tfm(subreq, ctx->child);
225         skcipher_request_set_callback(subreq, req->base.flags, done, req);
226 
227         gfp = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL :
228                                                            GFP_ATOMIC;
229         rctx->ext = NULL;
230 
231         subreq->cryptlen = XTS_BUFFER_SIZE;
232         if (req->cryptlen > XTS_BUFFER_SIZE) {
233                 subreq->cryptlen = min(req->cryptlen, (unsigned)PAGE_SIZE);
234                 rctx->ext = kmalloc(subreq->cryptlen, gfp);
235         }
236 
237         rctx->src = req->src;
238         rctx->dst = req->dst;
239         rctx->left = req->cryptlen;
240 
241         /* calculate first value of T */
242         crypto_cipher_encrypt_one(ctx->tweak, (u8 *)&rctx->t, req->iv);
243 
244         return 0;
245 }
246 
247 static void exit_crypt(struct skcipher_request *req)
248 {
249         struct rctx *rctx = skcipher_request_ctx(req);
250 
251         rctx->left = 0;
252 
253         if (rctx->ext)
254                 kzfree(rctx->ext);
255 }
256 
257 static int do_encrypt(struct skcipher_request *req, int err)
258 {
259         struct rctx *rctx = skcipher_request_ctx(req);
260         struct skcipher_request *subreq;
261 
262         subreq = &rctx->subreq;
263 
264         while (!err && rctx->left) {
265                 err = pre_crypt(req) ?:
266                       crypto_skcipher_encrypt(subreq) ?:
267                       post_crypt(req);
268 
269                 if (err == -EINPROGRESS ||
270                     (err == -EBUSY &&
271                      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
272                         return err;
273         }
274 
275         exit_crypt(req);
276         return err;
277 }
278 
279 static void encrypt_done(struct crypto_async_request *areq, int err)
280 {
281         struct skcipher_request *req = areq->data;
282         struct skcipher_request *subreq;
283         struct rctx *rctx;
284 
285         rctx = skcipher_request_ctx(req);
286         subreq = &rctx->subreq;
287         subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
288 
289         err = do_encrypt(req, err ?: post_crypt(req));
290         if (rctx->left)
291                 return;
292 
293         skcipher_request_complete(req, err);
294 }
295 
296 static int encrypt(struct skcipher_request *req)
297 {
298         return do_encrypt(req, init_crypt(req, encrypt_done));
299 }
300 
301 static int do_decrypt(struct skcipher_request *req, int err)
302 {
303         struct rctx *rctx = skcipher_request_ctx(req);
304         struct skcipher_request *subreq;
305 
306         subreq = &rctx->subreq;
307 
308         while (!err && rctx->left) {
309                 err = pre_crypt(req) ?:
310                       crypto_skcipher_decrypt(subreq) ?:
311                       post_crypt(req);
312 
313                 if (err == -EINPROGRESS ||
314                     (err == -EBUSY &&
315                      req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
316                         return err;
317         }
318 
319         exit_crypt(req);
320         return err;
321 }
322 
323 static void decrypt_done(struct crypto_async_request *areq, int err)
324 {
325         struct skcipher_request *req = areq->data;
326         struct skcipher_request *subreq;
327         struct rctx *rctx;
328 
329         rctx = skcipher_request_ctx(req);
330         subreq = &rctx->subreq;
331         subreq->base.flags &= CRYPTO_TFM_REQ_MAY_BACKLOG;
332 
333         err = do_decrypt(req, err ?: post_crypt(req));
334         if (rctx->left)
335                 return;
336 
337         skcipher_request_complete(req, err);
338 }
339 
340 static int decrypt(struct skcipher_request *req)
341 {
342         return do_decrypt(req, init_crypt(req, decrypt_done));
343 }
344 
345 int xts_crypt(struct blkcipher_desc *desc, struct scatterlist *sdst,
346               struct scatterlist *ssrc, unsigned int nbytes,
347               struct xts_crypt_req *req)
348 {
349         const unsigned int bsize = XTS_BLOCK_SIZE;
350         const unsigned int max_blks = req->tbuflen / bsize;
351         struct blkcipher_walk walk;
352         unsigned int nblocks;
353         be128 *src, *dst, *t;
354         be128 *t_buf = req->tbuf;
355         int err, i;
356 
357         BUG_ON(max_blks < 1);
358 
359         blkcipher_walk_init(&walk, sdst, ssrc, nbytes);
360 
361         err = blkcipher_walk_virt(desc, &walk);
362         nbytes = walk.nbytes;
363         if (!nbytes)
364                 return err;
365 
366         nblocks = min(nbytes / bsize, max_blks);
367         src = (be128 *)walk.src.virt.addr;
368         dst = (be128 *)walk.dst.virt.addr;
369 
370         /* calculate first value of T */
371         req->tweak_fn(req->tweak_ctx, (u8 *)&t_buf[0], walk.iv);
372 
373         i = 0;
374         goto first;
375 
376         for (;;) {
377                 do {
378                         for (i = 0; i < nblocks; i++) {
379                                 gf128mul_x_ble(&t_buf[i], t);
380 first:
381                                 t = &t_buf[i];
382 
383                                 /* PP <- T xor P */
384                                 be128_xor(dst + i, t, src + i);
385                         }
386 
387                         /* CC <- E(Key2,PP) */
388                         req->crypt_fn(req->crypt_ctx, (u8 *)dst,
389                                       nblocks * bsize);
390 
391                         /* C <- T xor CC */
392                         for (i = 0; i < nblocks; i++)
393                                 be128_xor(dst + i, dst + i, &t_buf[i]);
394 
395                         src += nblocks;
396                         dst += nblocks;
397                         nbytes -= nblocks * bsize;
398                         nblocks = min(nbytes / bsize, max_blks);
399                 } while (nblocks > 0);
400 
401                 *(be128 *)walk.iv = *t;
402 
403                 err = blkcipher_walk_done(desc, &walk, nbytes);
404                 nbytes = walk.nbytes;
405                 if (!nbytes)
406                         break;
407 
408                 nblocks = min(nbytes / bsize, max_blks);
409                 src = (be128 *)walk.src.virt.addr;
410                 dst = (be128 *)walk.dst.virt.addr;
411         }
412 
413         return err;
414 }
415 EXPORT_SYMBOL_GPL(xts_crypt);
416 
417 static int init_tfm(struct crypto_skcipher *tfm)
418 {
419         struct skcipher_instance *inst = skcipher_alg_instance(tfm);
420         struct xts_instance_ctx *ictx = skcipher_instance_ctx(inst);
421         struct priv *ctx = crypto_skcipher_ctx(tfm);
422         struct crypto_skcipher *child;
423         struct crypto_cipher *tweak;
424 
425         child = crypto_spawn_skcipher(&ictx->spawn);
426         if (IS_ERR(child))
427                 return PTR_ERR(child);
428 
429         ctx->child = child;
430 
431         tweak = crypto_alloc_cipher(ictx->name, 0, 0);
432         if (IS_ERR(tweak)) {
433                 crypto_free_skcipher(ctx->child);
434                 return PTR_ERR(tweak);
435         }
436 
437         ctx->tweak = tweak;
438 
439         crypto_skcipher_set_reqsize(tfm, crypto_skcipher_reqsize(child) +
440                                          sizeof(struct rctx));
441 
442         return 0;
443 }
444 
445 static void exit_tfm(struct crypto_skcipher *tfm)
446 {
447         struct priv *ctx = crypto_skcipher_ctx(tfm);
448 
449         crypto_free_skcipher(ctx->child);
450         crypto_free_cipher(ctx->tweak);
451 }
452 
453 static void free(struct skcipher_instance *inst)
454 {
455         crypto_drop_skcipher(skcipher_instance_ctx(inst));
456         kfree(inst);
457 }
458 
459 static int create(struct crypto_template *tmpl, struct rtattr **tb)
460 {
461         struct skcipher_instance *inst;
462         struct crypto_attr_type *algt;
463         struct xts_instance_ctx *ctx;
464         struct skcipher_alg *alg;
465         const char *cipher_name;
466         int err;
467 
468         algt = crypto_get_attr_type(tb);
469         if (IS_ERR(algt))
470                 return PTR_ERR(algt);
471 
472         if ((algt->type ^ CRYPTO_ALG_TYPE_SKCIPHER) & algt->mask)
473                 return -EINVAL;
474 
475         cipher_name = crypto_attr_alg_name(tb[1]);
476         if (IS_ERR(cipher_name))
477                 return PTR_ERR(cipher_name);
478 
479         inst = kzalloc(sizeof(*inst) + sizeof(*ctx), GFP_KERNEL);
480         if (!inst)
481                 return -ENOMEM;
482 
483         ctx = skcipher_instance_ctx(inst);
484 
485         crypto_set_skcipher_spawn(&ctx->spawn, skcipher_crypto_instance(inst));
486         err = crypto_grab_skcipher(&ctx->spawn, cipher_name, 0,
487                                    crypto_requires_sync(algt->type,
488                                                         algt->mask));
489         if (err == -ENOENT) {
490                 err = -ENAMETOOLONG;
491                 if (snprintf(ctx->name, CRYPTO_MAX_ALG_NAME, "ecb(%s)",
492                              cipher_name) >= CRYPTO_MAX_ALG_NAME)
493                         goto err_free_inst;
494 
495                 err = crypto_grab_skcipher(&ctx->spawn, ctx->name, 0,
496                                            crypto_requires_sync(algt->type,
497                                                                 algt->mask));
498         }
499 
500         if (err)
501                 goto err_free_inst;
502 
503         alg = crypto_skcipher_spawn_alg(&ctx->spawn);
504 
505         err = -EINVAL;
506         if (alg->base.cra_blocksize != XTS_BLOCK_SIZE)
507                 goto err_drop_spawn;
508 
509         if (crypto_skcipher_alg_ivsize(alg))
510                 goto err_drop_spawn;
511 
512         err = crypto_inst_setname(skcipher_crypto_instance(inst), "xts",
513                                   &alg->base);
514         if (err)
515                 goto err_drop_spawn;
516 
517         err = -EINVAL;
518         cipher_name = alg->base.cra_name;
519 
520         /* Alas we screwed up the naming so we have to mangle the
521          * cipher name.
522          */
523         if (!strncmp(cipher_name, "ecb(", 4)) {
524                 unsigned len;
525 
526                 len = strlcpy(ctx->name, cipher_name + 4, sizeof(ctx->name));
527                 if (len < 2 || len >= sizeof(ctx->name))
528                         goto err_drop_spawn;
529 
530                 if (ctx->name[len - 1] != ')')
531                         goto err_drop_spawn;
532 
533                 ctx->name[len - 1] = 0;
534 
535                 if (snprintf(inst->alg.base.cra_name, CRYPTO_MAX_ALG_NAME,
536                              "xts(%s)", ctx->name) >= CRYPTO_MAX_ALG_NAME)
537                         return -ENAMETOOLONG;
538         } else
539                 goto err_drop_spawn;
540 
541         inst->alg.base.cra_flags = alg->base.cra_flags & CRYPTO_ALG_ASYNC;
542         inst->alg.base.cra_priority = alg->base.cra_priority;
543         inst->alg.base.cra_blocksize = XTS_BLOCK_SIZE;
544         inst->alg.base.cra_alignmask = alg->base.cra_alignmask |
545                                        (__alignof__(u64) - 1);
546 
547         inst->alg.ivsize = XTS_BLOCK_SIZE;
548         inst->alg.min_keysize = crypto_skcipher_alg_min_keysize(alg) * 2;
549         inst->alg.max_keysize = crypto_skcipher_alg_max_keysize(alg) * 2;
550 
551         inst->alg.base.cra_ctxsize = sizeof(struct priv);
552 
553         inst->alg.init = init_tfm;
554         inst->alg.exit = exit_tfm;
555 
556         inst->alg.setkey = setkey;
557         inst->alg.encrypt = encrypt;
558         inst->alg.decrypt = decrypt;
559 
560         inst->free = free;
561 
562         err = skcipher_register_instance(tmpl, inst);
563         if (err)
564                 goto err_drop_spawn;
565 
566 out:
567         return err;
568 
569 err_drop_spawn:
570         crypto_drop_skcipher(&ctx->spawn);
571 err_free_inst:
572         kfree(inst);
573         goto out;
574 }
575 
576 static struct crypto_template crypto_tmpl = {
577         .name = "xts",
578         .create = create,
579         .module = THIS_MODULE,
580 };
581 
582 static int __init crypto_module_init(void)
583 {
584         return crypto_register_template(&crypto_tmpl);
585 }
586 
587 static void __exit crypto_module_exit(void)
588 {
589         crypto_unregister_template(&crypto_tmpl);
590 }
591 
592 module_init(crypto_module_init);
593 module_exit(crypto_module_exit);
594 
595 MODULE_LICENSE("GPL");
596 MODULE_DESCRIPTION("XTS block cipher mode");
597 MODULE_ALIAS_CRYPTO("xts");
598 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us