2 * echainiv: Encrypted Chain IV Generator
4 * This generator generates an IV based on a sequence number by xoring it
5 * with a salt and then encrypting it with the same key as used to encrypt
6 * the plain text. This algorithm requires that the block size be equal
7 * to the IV size. It is mainly useful for CBC.
9 * This generator can only be used by algorithms where authentication
10 * is performed after encryption (i.e., authenc).
12 * Copyright (c) 2015 Herbert Xu <herbert@gondor.apana.org.au>
14 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the Free
16 * Software Foundation; either version 2 of the License, or (at your option)
21 #include <crypto/internal/geniv.h>
22 #include <crypto/scatterwalk.h>
23 #include <linux/err.h>
24 #include <linux/init.h>
25 #include <linux/kernel.h>
27 #include <linux/module.h>
28 #include <linux/percpu.h>
29 #include <linux/spinlock.h>
30 #include <linux/string.h>
32 #define MAX_IV_SIZE 16
34 static DEFINE_PER_CPU(u32 [MAX_IV_SIZE / sizeof(u32)], echainiv_iv);
36 /* We don't care if we get preempted and read/write IVs from the next CPU. */
37 static void echainiv_read_iv(u8 *dst, unsigned size)
40 u32 __percpu *b = echainiv_iv;
42 for (; size >= 4; size -= 4) {
43 *a++ = this_cpu_read(*b);
48 static void echainiv_write_iv(const u8 *src, unsigned size)
50 const u32 *a = (const u32 *)src;
51 u32 __percpu *b = echainiv_iv;
53 for (; size >= 4; size -= 4) {
54 this_cpu_write(*b, *a);
60 static void echainiv_encrypt_complete2(struct aead_request *req, int err)
62 struct aead_request *subreq = aead_request_ctx(req);
63 struct crypto_aead *geniv;
66 if (err == -EINPROGRESS)
72 geniv = crypto_aead_reqtfm(req);
73 ivsize = crypto_aead_ivsize(geniv);
75 echainiv_write_iv(subreq->iv, ivsize);
77 if (req->iv != subreq->iv)
78 memcpy(req->iv, subreq->iv, ivsize);
81 if (req->iv != subreq->iv)
85 static void echainiv_encrypt_complete(struct crypto_async_request *base,
88 struct aead_request *req = base->data;
90 echainiv_encrypt_complete2(req, err);
91 aead_request_complete(req, err);
94 static int echainiv_encrypt(struct aead_request *req)
96 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
97 struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
98 struct aead_request *subreq = aead_request_ctx(req);
99 crypto_completion_t compl;
102 unsigned int ivsize = crypto_aead_ivsize(geniv);
105 if (req->cryptlen < ivsize)
108 aead_request_set_tfm(subreq, ctx->child);
110 compl = echainiv_encrypt_complete;
114 if (req->src != req->dst) {
115 struct blkcipher_desc desc = {
119 err = crypto_blkcipher_encrypt(
120 &desc, req->dst, req->src,
121 req->assoclen + req->cryptlen);
126 if (unlikely(!IS_ALIGNED((unsigned long)info,
127 crypto_aead_alignmask(geniv) + 1))) {
128 info = kmalloc(ivsize, req->base.flags &
129 CRYPTO_TFM_REQ_MAY_SLEEP ? GFP_KERNEL:
134 memcpy(info, req->iv, ivsize);
137 aead_request_set_callback(subreq, req->base.flags, compl, data);
138 aead_request_set_crypt(subreq, req->dst, req->dst,
139 req->cryptlen, info);
140 aead_request_set_ad(subreq, req->assoclen);
142 crypto_xor(info, ctx->salt, ivsize);
143 scatterwalk_map_and_copy(info, req->dst, req->assoclen, ivsize, 1);
144 echainiv_read_iv(info, ivsize);
146 err = crypto_aead_encrypt(subreq);
147 echainiv_encrypt_complete2(req, err);
151 static int echainiv_decrypt(struct aead_request *req)
153 struct crypto_aead *geniv = crypto_aead_reqtfm(req);
154 struct aead_geniv_ctx *ctx = crypto_aead_ctx(geniv);
155 struct aead_request *subreq = aead_request_ctx(req);
156 crypto_completion_t compl;
158 unsigned int ivsize = crypto_aead_ivsize(geniv);
160 if (req->cryptlen < ivsize)
163 aead_request_set_tfm(subreq, ctx->child);
165 compl = req->base.complete;
166 data = req->base.data;
168 aead_request_set_callback(subreq, req->base.flags, compl, data);
169 aead_request_set_crypt(subreq, req->src, req->dst,
170 req->cryptlen - ivsize, req->iv);
171 aead_request_set_ad(subreq, req->assoclen + ivsize);
173 scatterwalk_map_and_copy(req->iv, req->src, req->assoclen, ivsize, 0);
175 return crypto_aead_decrypt(subreq);
178 static int echainiv_aead_create(struct crypto_template *tmpl,
181 struct aead_instance *inst;
182 struct crypto_aead_spawn *spawn;
183 struct aead_alg *alg;
186 inst = aead_geniv_alloc(tmpl, tb, 0, 0);
189 return PTR_ERR(inst);
191 spawn = aead_instance_ctx(inst);
192 alg = crypto_spawn_aead_alg(spawn);
195 if (inst->alg.ivsize & (sizeof(u32) - 1) ||
196 inst->alg.ivsize > MAX_IV_SIZE)
199 inst->alg.encrypt = echainiv_encrypt;
200 inst->alg.decrypt = echainiv_decrypt;
202 inst->alg.init = aead_init_geniv;
203 inst->alg.exit = aead_exit_geniv;
205 inst->alg.base.cra_alignmask |= __alignof__(u32) - 1;
206 inst->alg.base.cra_ctxsize = sizeof(struct aead_geniv_ctx);
207 inst->alg.base.cra_ctxsize += inst->alg.ivsize;
209 inst->free = aead_geniv_free;
211 err = aead_register_instance(tmpl, inst);
219 aead_geniv_free(inst);
223 static void echainiv_free(struct crypto_instance *inst)
225 aead_geniv_free(aead_instance(inst));
228 static struct crypto_template echainiv_tmpl = {
230 .create = echainiv_aead_create,
231 .free = echainiv_free,
232 .module = THIS_MODULE,
235 static int __init echainiv_module_init(void)
237 return crypto_register_template(&echainiv_tmpl);
240 static void __exit echainiv_module_exit(void)
242 crypto_unregister_template(&echainiv_tmpl);
245 module_init(echainiv_module_init);
246 module_exit(echainiv_module_exit);
248 MODULE_LICENSE("GPL");
249 MODULE_DESCRIPTION("Encrypted Chain IV Generator");
250 MODULE_ALIAS_CRYPTO("echainiv");