summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/crypto/aead.h172
-rw-r--r--include/crypto/algapi.h3
-rw-r--r--include/crypto/chacha20.h25
-rw-r--r--include/crypto/hash.h5
-rw-r--r--include/crypto/internal/aead.h72
-rw-r--r--include/crypto/internal/geniv.h9
-rw-r--r--include/crypto/internal/skcipher.h15
-rw-r--r--include/crypto/poly1305.h41
-rw-r--r--include/crypto/skcipher.h391
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h5
-rw-r--r--include/linux/crypto.h54
11 files changed, 564 insertions, 228 deletions
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 7169ad04acc0..077cae1e6b51 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -1,7 +1,7 @@
/*
* AEAD: Authenticated Encryption with Associated Data
*
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -45,16 +45,40 @@
* a breach in the integrity of the message. In essence, that -EBADMSG error
* code is the key bonus an AEAD cipher has over "standard" block chaining
* modes.
+ *
+ * Memory Structure:
+ *
+ * To support the needs of the most prominent user of AEAD ciphers, namely
+ * IPSEC, the AEAD ciphers have a special memory layout the caller must adhere
+ * to.
+ *
+ * The scatter list pointing to the input data must contain:
+ *
+ * * for RFC4106 ciphers, the concatenation of
+ * associated authentication data || IV || plaintext or ciphertext. Note, the
+ * same IV (buffer) is also set with the aead_request_set_crypt call. Note,
+ * the API call of aead_request_set_ad must provide the length of the AAD and
+ * the IV. The API call of aead_request_set_crypt only points to the size of
+ * the input plaintext or ciphertext.
+ *
+ * * for "normal" AEAD ciphers, the concatenation of
+ * associated authentication data || plaintext or ciphertext.
+ *
+ * It is important to note that if multiple scatter gather list entries form
+ * the input data mentioned above, the first entry must not point to a NULL
+ * buffer. If there is any potential where the AAD buffer can be NULL, the
+ * calling code must contain a precaution to ensure that this does not result
+ * in the first scatter gather list entry pointing to a NULL buffer.
*/
+struct crypto_aead;
+
/**
* struct aead_request - AEAD request
* @base: Common attributes for async crypto requests
- * @old: Boolean whether the old or new AEAD API is used
* @assoclen: Length in bytes of associated data for authentication
* @cryptlen: Length of data to be encrypted or decrypted
* @iv: Initialisation vector
- * @assoc: Associated data
* @src: Source data
* @dst: Destination data
* @__ctx: Start of private context data
@@ -62,14 +86,11 @@
struct aead_request {
struct crypto_async_request base;
- bool old;
-
unsigned int assoclen;
unsigned int cryptlen;
u8 *iv;
- struct scatterlist *assoc;
struct scatterlist *src;
struct scatterlist *dst;
@@ -77,19 +98,6 @@ struct aead_request {
};
/**
- * struct aead_givcrypt_request - AEAD request with IV generation
- * @seq: Sequence number for IV generation
- * @giv: Space for generated IV
- * @areq: The AEAD request itself
- */
-struct aead_givcrypt_request {
- u64 seq;
- u8 *giv;
-
- struct aead_request areq;
-};
-
-/**
* struct aead_alg - AEAD cipher definition
* @maxauthsize: Set the maximum authentication tag size supported by the
* transformation. A transformation may support smaller tag sizes.
@@ -141,16 +149,6 @@ struct aead_alg {
};
struct crypto_aead {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- struct crypto_aead *child;
-
unsigned int authsize;
unsigned int reqsize;
@@ -192,16 +190,6 @@ static inline void crypto_free_aead(struct crypto_aead *tfm)
crypto_destroy_tfm(tfm, crypto_aead_tfm(tfm));
}
-static inline struct crypto_aead *crypto_aead_crt(struct crypto_aead *tfm)
-{
- return tfm;
-}
-
-static inline struct old_aead_alg *crypto_old_aead_alg(struct crypto_aead *tfm)
-{
- return &crypto_aead_tfm(tfm)->__crt_alg->cra_aead;
-}
-
static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
{
return container_of(crypto_aead_tfm(tfm)->__crt_alg,
@@ -210,8 +198,7 @@ static inline struct aead_alg *crypto_aead_alg(struct crypto_aead *tfm)
static inline unsigned int crypto_aead_alg_ivsize(struct aead_alg *alg)
{
- return alg->base.cra_aead.encrypt ? alg->base.cra_aead.ivsize :
- alg->ivsize;
+ return alg->ivsize;
}
/**
@@ -337,7 +324,7 @@ static inline struct crypto_aead *crypto_aead_reqtfm(struct aead_request *req)
*/
static inline int crypto_aead_encrypt(struct aead_request *req)
{
- return crypto_aead_reqtfm(req)->encrypt(req);
+ return crypto_aead_alg(crypto_aead_reqtfm(req))->encrypt(req);
}
/**
@@ -364,10 +351,12 @@ static inline int crypto_aead_encrypt(struct aead_request *req)
*/
static inline int crypto_aead_decrypt(struct aead_request *req)
{
- if (req->cryptlen < crypto_aead_authsize(crypto_aead_reqtfm(req)))
+ struct crypto_aead *aead = crypto_aead_reqtfm(req);
+
+ if (req->cryptlen < crypto_aead_authsize(aead))
return -EINVAL;
- return crypto_aead_reqtfm(req)->decrypt(req);
+ return crypto_aead_alg(aead)->decrypt(req);
}
/**
@@ -387,7 +376,10 @@ static inline int crypto_aead_decrypt(struct aead_request *req)
*
* Return: number of bytes
*/
-unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
+static inline unsigned int crypto_aead_reqsize(struct crypto_aead *tfm)
+{
+ return tfm->reqsize;
+}
/**
* aead_request_set_tfm() - update cipher handle reference in request
@@ -400,7 +392,7 @@ unsigned int crypto_aead_reqsize(struct crypto_aead *tfm);
static inline void aead_request_set_tfm(struct aead_request *req,
struct crypto_aead *tfm)
{
- req->base.tfm = crypto_aead_tfm(tfm->child);
+ req->base.tfm = crypto_aead_tfm(tfm);
}
/**
@@ -526,23 +518,6 @@ static inline void aead_request_set_crypt(struct aead_request *req,
}
/**
- * aead_request_set_assoc() - set the associated data scatter / gather list
- * @req: request handle
- * @assoc: associated data scatter / gather list
- * @assoclen: number of bytes to process from @assoc
- *
- * Obsolete, do not use.
- */
-static inline void aead_request_set_assoc(struct aead_request *req,
- struct scatterlist *assoc,
- unsigned int assoclen)
-{
- req->assoc = assoc;
- req->assoclen = assoclen;
- req->old = true;
-}
-
-/**
* aead_request_set_ad - set associated data information
* @req: request handle
* @assoclen: number of bytes in associated data
@@ -554,77 +529,6 @@ static inline void aead_request_set_ad(struct aead_request *req,
unsigned int assoclen)
{
req->assoclen = assoclen;
- req->old = false;
-}
-
-static inline struct crypto_aead *aead_givcrypt_reqtfm(
- struct aead_givcrypt_request *req)
-{
- return crypto_aead_reqtfm(&req->areq);
-}
-
-static inline int crypto_aead_givencrypt(struct aead_givcrypt_request *req)
-{
- return aead_givcrypt_reqtfm(req)->givencrypt(req);
-};
-
-static inline int crypto_aead_givdecrypt(struct aead_givcrypt_request *req)
-{
- return aead_givcrypt_reqtfm(req)->givdecrypt(req);
-};
-
-static inline void aead_givcrypt_set_tfm(struct aead_givcrypt_request *req,
- struct crypto_aead *tfm)
-{
- req->areq.base.tfm = crypto_aead_tfm(tfm);
-}
-
-static inline struct aead_givcrypt_request *aead_givcrypt_alloc(
- struct crypto_aead *tfm, gfp_t gfp)
-{
- struct aead_givcrypt_request *req;
-
- req = kmalloc(sizeof(struct aead_givcrypt_request) +
- crypto_aead_reqsize(tfm), gfp);
-
- if (likely(req))
- aead_givcrypt_set_tfm(req, tfm);
-
- return req;
-}
-
-static inline void aead_givcrypt_free(struct aead_givcrypt_request *req)
-{
- kfree(req);
-}
-
-static inline void aead_givcrypt_set_callback(
- struct aead_givcrypt_request *req, u32 flags,
- crypto_completion_t compl, void *data)
-{
- aead_request_set_callback(&req->areq, flags, compl, data);
-}
-
-static inline void aead_givcrypt_set_crypt(struct aead_givcrypt_request *req,
- struct scatterlist *src,
- struct scatterlist *dst,
- unsigned int nbytes, void *iv)
-{
- aead_request_set_crypt(&req->areq, src, dst, nbytes, iv);
-}
-
-static inline void aead_givcrypt_set_assoc(struct aead_givcrypt_request *req,
- struct scatterlist *assoc,
- unsigned int assoclen)
-{
- aead_request_set_assoc(&req->areq, assoc, assoclen);
-}
-
-static inline void aead_givcrypt_set_giv(struct aead_givcrypt_request *req,
- u8 *giv, u64 seq)
-{
- req->giv = giv;
- req->seq = seq;
}
#endif /* _CRYPTO_AEAD_H */
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index d4ebf6e9af6a..c9fe145f7dd3 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -18,6 +18,7 @@
#include <linux/skbuff.h>
struct crypto_aead;
+struct crypto_instance;
struct module;
struct rtattr;
struct seq_file;
@@ -30,6 +31,7 @@ struct crypto_type {
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*report)(struct sk_buff *skb, struct crypto_alg *alg);
struct crypto_alg *(*lookup)(const char *name, u32 type, u32 mask);
+ void (*free)(struct crypto_instance *inst);
unsigned int type;
unsigned int maskclear;
@@ -180,7 +182,6 @@ struct crypto_instance *crypto_alloc_instance(const char *name,
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
struct crypto_async_request *request);
-void *__crypto_dequeue_request(struct crypto_queue *queue, unsigned int offset);
struct crypto_async_request *crypto_dequeue_request(struct crypto_queue *queue);
int crypto_tfm_in_queue(struct crypto_queue *queue, struct crypto_tfm *tfm);
diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h
new file mode 100644
index 000000000000..274bbaeeed0f
--- /dev/null
+++ b/include/crypto/chacha20.h
@@ -0,0 +1,25 @@
+/*
+ * Common values for the ChaCha20 algorithm
+ */
+
+#ifndef _CRYPTO_CHACHA20_H
+#define _CRYPTO_CHACHA20_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define CHACHA20_IV_SIZE 16
+#define CHACHA20_KEY_SIZE 32
+#define CHACHA20_BLOCK_SIZE 64
+
+struct chacha20_ctx {
+ u32 key[8];
+};
+
+void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv);
+int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key,
+ unsigned int keysize);
+int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
+ struct scatterlist *src, unsigned int nbytes);
+
+#endif
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index 57c8a6ee33c2..8e920b44c0ac 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -63,6 +63,11 @@ struct ahash_request {
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
+#define AHASH_REQUEST_ON_STACK(name, ahash) \
+ char __##name##_desc[sizeof(struct ahash_request) + \
+ crypto_ahash_reqsize(ahash)] CRYPTO_MINALIGN_ATTR; \
+ struct ahash_request *name = (void *)__##name##_desc
+
/**
* struct ahash_alg - asynchronous message digest definition
* @init: Initialize the transformation context. Intended only to initialize the
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index 4b2547186519..5554cdd8d6c1 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -1,7 +1,7 @@
/*
* AEAD: Authenticated Encryption with Associated Data
*
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -21,6 +21,7 @@
struct rtattr;
struct aead_instance {
+ void (*free)(struct aead_instance *inst);
union {
struct {
char head[offsetof(struct aead_alg, base)];
@@ -34,20 +35,15 @@ struct crypto_aead_spawn {
struct crypto_spawn base;
};
-extern const struct crypto_type crypto_aead_type;
-extern const struct crypto_type crypto_nivaead_type;
+struct aead_queue {
+ struct crypto_queue base;
+};
static inline void *crypto_aead_ctx(struct crypto_aead *tfm)
{
return crypto_tfm_ctx(&tfm->base);
}
-static inline struct crypto_instance *crypto_aead_alg_instance(
- struct crypto_aead *aead)
-{
- return crypto_tfm_alg_instance(&aead->base);
-}
-
static inline struct crypto_instance *aead_crypto_instance(
struct aead_instance *inst)
{
@@ -61,7 +57,7 @@ static inline struct aead_instance *aead_instance(struct crypto_instance *inst)
static inline struct aead_instance *aead_alg_instance(struct crypto_aead *aead)
{
- return aead_instance(crypto_aead_alg_instance(aead));
+ return aead_instance(crypto_tfm_alg_instance(&aead->base));
}
static inline void *aead_instance_ctx(struct aead_instance *inst)
@@ -90,8 +86,6 @@ static inline void crypto_set_aead_spawn(
crypto_set_spawn(&spawn->base, inst);
}
-struct crypto_alg *crypto_lookup_aead(const char *name, u32 type, u32 mask);
-
int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
u32 type, u32 mask);
@@ -100,12 +94,6 @@ static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
crypto_drop_spawn(&spawn->base);
}
-static inline struct crypto_alg *crypto_aead_spawn_alg(
- struct crypto_aead_spawn *spawn)
-{
- return spawn->base.alg;
-}
-
static inline struct aead_alg *crypto_spawn_aead_alg(
struct crypto_aead_spawn *spawn)
{
@@ -118,43 +106,51 @@ static inline struct crypto_aead *crypto_spawn_aead(
return crypto_spawn_tfm2(&spawn->base);
}
-struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
- struct rtattr **tb, u32 type, u32 mask);
-void aead_geniv_free(struct aead_instance *inst);
-int aead_geniv_init(struct crypto_tfm *tfm);
-void aead_geniv_exit(struct crypto_tfm *tfm);
+static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
+ unsigned int reqsize)
+{
+ aead->reqsize = reqsize;
+}
-static inline struct crypto_aead *aead_geniv_base(struct crypto_aead *geniv)
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
{
- return geniv->child;
+ return alg->maxauthsize;
}
-static inline void *aead_givcrypt_reqctx(struct aead_givcrypt_request *req)
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
{
- return aead_request_ctx(&req->areq);
+ return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
}
-static inline void aead_givcrypt_complete(struct aead_givcrypt_request *req,
- int err)
+static inline void aead_init_queue(struct aead_queue *queue,
+ unsigned int max_qlen)
{
- aead_request_complete(&req->areq, err);
+ crypto_init_queue(&queue->base, max_qlen);
}
-static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
- unsigned int reqsize)
+static inline int aead_enqueue_request(struct aead_queue *queue,
+ struct aead_request *request)
{
- crypto_aead_crt(aead)->reqsize = reqsize;
+ return crypto_enqueue_request(&queue->base, &request->base);
}
-static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+static inline struct aead_request *aead_dequeue_request(
+ struct aead_queue *queue)
{
- return alg->base.cra_aead.encrypt ? alg->base.cra_aead.maxauthsize :
- alg->maxauthsize;
+ struct crypto_async_request *req;
+
+ req = crypto_dequeue_request(&queue->base);
+
+ return req ? container_of(req, struct aead_request, base) : NULL;
}
-static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+static inline struct aead_request *aead_get_backlog(struct aead_queue *queue)
{
- return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+ struct crypto_async_request *req;
+
+ req = crypto_get_backlog(&queue->base);
+
+ return req ? container_of(req, struct aead_request, base) : NULL;
}
int crypto_register_aead(struct aead_alg *alg);
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 9ca9b871aba5..59333635e712 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -15,10 +15,19 @@
#include <crypto/internal/aead.h>
#include <linux/spinlock.h>
+#include <linux/types.h>
struct aead_geniv_ctx {
spinlock_t lock;
struct crypto_aead *child;
+ struct crypto_blkcipher *null;
+ u8 salt[] __attribute__ ((aligned(__alignof__(u32))));
};
+struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
+ struct rtattr **tb, u32 type, u32 mask);
+void aead_geniv_free(struct aead_instance *inst);
+int aead_init_geniv(struct crypto_aead *tfm);
+void aead_exit_geniv(struct crypto_aead *tfm);
+
#endif /* _CRYPTO_INTERNAL_GENIV_H */
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index b3a46c515d1b..2cf7a61ece59 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -107,5 +107,20 @@ static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
return req->base.flags;
}
+static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_ctx(&tfm->base);
+}
+
+static inline void *skcipher_request_ctx(struct skcipher_request *req)
+{
+ return req->__ctx;
+}
+
+static inline u32 skcipher_request_flags(struct skcipher_request *req)
+{
+ return req->base.flags;
+}
+
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
new file mode 100644
index 000000000000..894df59b74e4
--- /dev/null
+++ b/include/crypto/poly1305.h
@@ -0,0 +1,41 @@
+/*
+ * Common values for the Poly1305 algorithm
+ */
+
+#ifndef _CRYPTO_POLY1305_H
+#define _CRYPTO_POLY1305_H
+
+#include <linux/types.h>
+#include <linux/crypto.h>
+
+#define POLY1305_BLOCK_SIZE 16
+#define POLY1305_KEY_SIZE 32
+#define POLY1305_DIGEST_SIZE 16
+
+struct poly1305_desc_ctx {
+ /* key */
+ u32 r[5];
+ /* finalize key */
+ u32 s[4];
+ /* accumulator */
+ u32 h[5];
+ /* partial buffer */
+ u8 buf[POLY1305_BLOCK_SIZE];
+ /* bytes used in partial buffer */
+ unsigned int buflen;
+ /* r key has been set */
+ bool rset;
+ /* s key has been set */
+ bool sset;
+};
+
+int crypto_poly1305_init(struct shash_desc *desc);
+int crypto_poly1305_setkey(struct crypto_shash *tfm,
+ const u8 *key, unsigned int keylen);
+unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
+ const u8 *src, unsigned int srclen);
+int crypto_poly1305_update(struct shash_desc *desc,
+ const u8 *src, unsigned int srclen);
+int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
+
+#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 07d245f073d1..d8dd41fb034f 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -1,7 +1,7 @@
/*
* Symmetric key ciphers.
*
- * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
+ * Copyright (c) 2007-2015 Herbert Xu <herbert@gondor.apana.org.au>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
@@ -18,6 +18,28 @@
#include <linux/slab.h>
/**
+ * struct skcipher_request - Symmetric key cipher request
+ * @cryptlen: Number of bytes to encrypt or decrypt
+ * @iv: Initialisation Vector
+ * @src: Source SG list
+ * @dst: Destination SG list
+ * @base: Underlying async request request
+ * @__ctx: Start of private context data
+ */
+struct skcipher_request {
+ unsigned int cryptlen;
+
+ u8 *iv;
+
+ struct scatterlist *src;
+ struct scatterlist *dst;
+
+ struct crypto_async_request base;
+
+ void *__ctx[] CRYPTO_MINALIGN_ATTR;
+};
+
+/**
* struct skcipher_givcrypt_request - Crypto request with IV generation
* @seq: Sequence number for IV generation
* @giv: Space for generated IV
@@ -30,6 +52,23 @@ struct skcipher_givcrypt_request {
struct ablkcipher_request creq;
};
+struct crypto_skcipher {
+ int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keylen);
+ int (*encrypt)(struct skcipher_request *req);
+ int (*decrypt)(struct skcipher_request *req);
+
+ unsigned int ivsize;
+ unsigned int reqsize;
+
+ struct crypto_tfm base;
+};
+
+#define SKCIPHER_REQUEST_ON_STACK(name, tfm) \
+ char __##name##_desc[sizeof(struct skcipher_request) + \
+ crypto_skcipher_reqsize(tfm)] CRYPTO_MINALIGN_ATTR; \
+ struct skcipher_request *name = (void *)__##name##_desc
+
static inline struct crypto_ablkcipher *skcipher_givcrypt_reqtfm(
struct skcipher_givcrypt_request *req)
{
@@ -106,5 +145,355 @@ static inline void skcipher_givcrypt_set_giv(
req->seq = seq;
}
+/**
+ * DOC: Symmetric Key Cipher API
+ *
+ * Symmetric key cipher API is used with the ciphers of type
+ * CRYPTO_ALG_TYPE_SKCIPHER (listed as type "skcipher" in /proc/crypto).
+ *
+ * Asynchronous cipher operations imply that the function invocation for a
+ * cipher request returns immediately before the completion of the operation.
+ * The cipher request is scheduled as a separate kernel thread and therefore
+ * load-balanced on the different CPUs via the process scheduler. To allow
+ * the kernel crypto API to inform the caller about the completion of a cipher
+ * request, the caller must provide a callback function. That function is
+ * invoked with the cipher handle when the request completes.
+ *
+ * To support the asynchronous operation, additional information than just the
+ * cipher handle must be supplied to the kernel crypto API. That additional
+ * information is given by filling in the skcipher_request data structure.
+ *
+ * For the symmetric key cipher API, the state is maintained with the tfm
+ * cipher handle. A single tfm can be used across multiple calls and in
+ * parallel. For asynchronous block cipher calls, context data supplied and
+ * only used by the caller can be referenced the request data structure in
+ * addition to the IV used for the cipher request. The maintenance of such
+ * state information would be important for a crypto driver implementer to
+ * have, because when calling the callback function upon completion of the
+ * cipher operation, that callback function may need some information about
+ * which operation just finished if it invoked multiple in parallel. This
+ * state information is unused by the kernel crypto API.
+ */
+
+static inline struct crypto_skcipher *__crypto_skcipher_cast(
+ struct crypto_tfm *tfm)
+{
+ return container_of(tfm, struct crypto_skcipher, base);
+}
+
+/**
+ * crypto_alloc_skcipher() - allocate symmetric key cipher handle
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * skcipher cipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Allocate a cipher handle for an skcipher. The returned struct
+ * crypto_skcipher is the cipher handle that is required for any subsequent
+ * API invocation for that skcipher.
+ *
+ * Return: allocated cipher handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
+ u32 type, u32 mask);
+
+static inline struct crypto_tfm *crypto_skcipher_tfm(
+ struct crypto_skcipher *tfm)
+{
+ return &tfm->base;
+}
+
+/**
+ * crypto_free_skcipher() - zeroize and free cipher handle
+ * @tfm: cipher handle to be freed
+ */
+static inline void crypto_free_skcipher(struct crypto_skcipher *tfm)
+{
+ crypto_destroy_tfm(tfm, crypto_skcipher_tfm(tfm));
+}
+
+/**
+ * crypto_has_skcipher() - Search for the availability of an skcipher.
+ * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
+ * skcipher
+ * @type: specifies the type of the cipher
+ * @mask: specifies the mask for the cipher
+ *
+ * Return: true when the skcipher is known to the kernel crypto API; false
+ * otherwise
+ */
+static inline int crypto_has_skcipher(const char *alg_name, u32 type,
+ u32 mask)
+{
+ return crypto_has_alg(alg_name, crypto_skcipher_type(type),
+ crypto_skcipher_mask(mask));
+}
+
+/**
+ * crypto_skcipher_ivsize() - obtain IV size
+ * @tfm: cipher handle
+ *
+ * The size of the IV for the skcipher referenced by the cipher handle is
+ * returned. This IV size may be zero if the cipher does not need an IV.
+ *
+ * Return: IV size in bytes
+ */
+static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
+{
+ return tfm->ivsize;
+}
+
+/**
+ * crypto_skcipher_blocksize() - obtain block size of cipher
+ * @tfm: cipher handle
+ *
+ * The block size for the skcipher referenced with the cipher handle is
+ * returned. The caller may use that information to allocate appropriate
+ * memory for the data returned by the encryption or decryption operation
+ *
+ * Return: block size of cipher
+ */
+static inline unsigned int crypto_skcipher_blocksize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
+}
+
+static inline unsigned int crypto_skcipher_alignmask(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_alg_alignmask(crypto_skcipher_tfm(tfm));
+}
+
+static inline u32 crypto_skcipher_get_flags(struct crypto_skcipher *tfm)
+{
+ return crypto_tfm_get_flags(crypto_skcipher_tfm(tfm));
+}
+
+static inline void crypto_skcipher_set_flags(struct crypto_skcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_set_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+static inline void crypto_skcipher_clear_flags(struct crypto_skcipher *tfm,
+ u32 flags)
+{
+ crypto_tfm_clear_flags(crypto_skcipher_tfm(tfm), flags);
+}
+
+/**
+ * crypto_skcipher_setkey() - set key for cipher
+ * @tfm: cipher handle
+ * @key: buffer holding the key
+ * @keylen: length of the key in bytes
+ *
+ * The caller provided key is set for the skcipher referenced by the cipher
+ * handle.
+ *
+ * Note, the key length determines the cipher type. Many block ciphers implement
+ * different cipher modes depending on the key size, such as AES-128 vs AES-192
+ * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
+ * is performed.
+ *
+ * Return: 0 if the setting of the key was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen)
+{
+ return tfm->setkey(tfm, key, keylen);
+}
+
+/**
+ * crypto_skcipher_reqtfm() - obtain cipher handle from request
+ * @req: skcipher_request out of which the cipher handle is to be obtained
+ *
+ * Return the crypto_skcipher handle when furnishing an skcipher_request
+ * data structure.
+ *
+ * Return: crypto_skcipher handle
+ */
+static inline struct crypto_skcipher *crypto_skcipher_reqtfm(
+ struct skcipher_request *req)
+{
+ return __crypto_skcipher_cast(req->base.tfm);
+}
+
+/**
+ * crypto_skcipher_encrypt() - encrypt plaintext
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Encrypt plaintext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_encrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ return tfm->encrypt(req);
+}
+
+/**
+ * crypto_skcipher_decrypt() - decrypt ciphertext
+ * @req: reference to the skcipher_request handle that holds all information
+ * needed to perform the cipher operation
+ *
+ * Decrypt ciphertext data using the skcipher_request handle. That data
+ * structure and how it is filled with data is discussed with the
+ * skcipher_request_* functions.
+ *
+ * Return: 0 if the cipher operation was successful; < 0 if an error occurred
+ */
+static inline int crypto_skcipher_decrypt(struct skcipher_request *req)
+{
+ struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
+
+ return tfm->decrypt(req);
+}
+
+/**
+ * DOC: Symmetric Key Cipher Request Handle
+ *
+ * The skcipher_request data structure contains all pointers to data
+ * required for the symmetric key cipher operation. This includes the cipher
+ * handle (which can be used by multiple skcipher_request instances), pointer
+ * to plaintext and ciphertext, asynchronous callback function, etc. It acts
+ * as a handle to the skcipher_request_* API calls in a similar way as
+ * skcipher handle to the crypto_skcipher_* API calls.
+ */
+
+/**
+ * crypto_skcipher_reqsize() - obtain size of the request data structure
+ * @tfm: cipher handle
+ *
+ * Return: number of bytes
+ */
+static inline unsigned int crypto_skcipher_reqsize(struct crypto_skcipher *tfm)
+{
+ return tfm->reqsize;
+}
+
+/**
+ * skcipher_request_set_tfm() - update cipher handle reference in request
+ * @req: request handle to be modified
+ * @tfm: cipher handle that shall be added to the request handle
+ *
+ * Allow the caller to replace the existing skcipher handle in the request
+ * data structure with a different one.
+ */
+static inline void skcipher_request_set_tfm(struct skcipher_request *req,
+ struct crypto_skcipher *tfm)
+{
+ req->base.tfm = crypto_skcipher_tfm(tfm);
+}
+
+static inline struct skcipher_request *skcipher_request_cast(
+ struct crypto_async_request *req)
+{
+ return container_of(req, struct skcipher_request, base);
+}
+
+/**
+ * skcipher_request_alloc() - allocate request data structure
+ * @tfm: cipher handle to be registered with the request
+ * @gfp: memory allocation flag that is handed to kmalloc by the API call.
+ *
+ * Allocate the request data structure that must be used with the skcipher
+ * encrypt and decrypt API calls. During the allocation, the provided skcipher
+ * handle is registered in the request data structure.
+ *
+ * Return: allocated request handle in case of success; IS_ERR() is true in case
+ * of an error, PTR_ERR() returns the error code.
+ */
+static inline struct skcipher_request *skcipher_request_alloc(
+ struct crypto_skcipher *tfm, gfp_t gfp)
+{
+ struct skcipher_request *req;
+
+ req = kmalloc(sizeof(struct skcipher_request) +
+ crypto_skcipher_reqsize(tfm), gfp);
+
+ if (likely(req))
+ skcipher_request_set_tfm(req, tfm);
+
+ return req;
+}
+
+/**
+ * skcipher_request_free() - zeroize and free request data structure
+ * @req: request data structure cipher handle to be freed
+ */
+static inline void skcipher_request_free(struct skcipher_request *req)
+{
+ kzfree(req);
+}
+
+/**
+ * skcipher_request_set_callback() - set asynchronous callback function
+ * @req: request handle
+ * @flags: specify zero or an ORing of the flags
+ * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
+ * increase the wait queue beyond the initial maximum size;
+ * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
+ * @compl: callback function pointer to be registered with the request handle
+ * @data: The data pointer refers to memory that is not used by the kernel
+ * crypto API, but provided to the callback function for it to use. Here,
+ * the caller can provide a reference to memory the callback function can
+ * operate on. As the callback function is invoked asynchronously to the
+ * related functionality, it may need to access data structures of the
+ * related functionality which can be referenced using this pointer. The
+ * callback function can access the memory via the "data" field in the
+ * crypto_async_request data structure provided to the callback function.
+ *
+ * This function allows setting the callback function that is triggered once the
+ * cipher operation completes.
+ *
+ * The callback function is registered with the skcipher_request handle and
+ * must comply with the following template
+ *
+ * void callback_function(struct crypto_async_request *req, int error)
+ */
+static inline void skcipher_request_set_callback(struct skcipher_request *req,
+ u32 flags,
+ crypto_completion_t compl,
+ void *data)
+{
+ req->base.complete = compl;
+ req->base.data = data;
+ req->base.flags = flags;
+}
+
+/**
+ * skcipher_request_set_crypt() - set data buffers
+ * @req: request handle
+ * @src: source scatter / gather list
+ * @dst: destination scatter / gather list
+ * @cryptlen: number of bytes to process from @src
+ * @iv: IV for the cipher operation which must comply with the IV size defined
+ * by crypto_skcipher_ivsize
+ *
+ * This function allows setting of the source data and destination data
+ * scatter / gather lists.
+ *
+ * For encryption, the source is treated as the plaintext and the
+ * destination is the ciphertext. For a decryption operation, the use is
+ * reversed - the source is the ciphertext and the destination is the plaintext.
+ */
+static inline void skcipher_request_set_crypt(
+ struct skcipher_request *req,
+ struct scatterlist *src, struct scatterlist *dst,
+ unsigned int cryptlen, void *iv)
+{
+ req->src = src;
+ req->dst = dst;
+ req->cryptlen = cryptlen;
+ req->iv = iv;
+}
+
#endif /* _CRYPTO_SKCIPHER_H */
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index 8780868458a0..8de173ff19f3 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -251,6 +251,9 @@
#define IMX6QDL_CLK_VIDEO_27M 238
#define IMX6QDL_CLK_MIPI_CORE_CFG 239
#define IMX6QDL_CLK_MIPI_IPG 240
-#define IMX6QDL_CLK_END 241
+#define IMX6QDL_CLK_CAAM_MEM 241
+#define IMX6QDL_CLK_CAAM_ACLK 242
+#define IMX6QDL_CLK_CAAM_IPG 243
+#define IMX6QDL_CLK_END 244
#endif /* __DT_BINDINGS_CLOCK_IMX6QDL_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 81ef938b0a8e..e71cb70a1ac2 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -102,12 +102,6 @@
#define CRYPTO_ALG_INTERNAL 0x00002000
/*
- * Temporary flag used to prevent legacy AEAD implementations from
- * being used by user-space.
- */
-#define CRYPTO_ALG_AEAD_NEW 0x00004000
-
-/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_REQ_MASK 0x000fff00
@@ -142,13 +136,10 @@
struct scatterlist;
struct crypto_ablkcipher;
struct crypto_async_request;
-struct crypto_aead;
struct crypto_blkcipher;
struct crypto_hash;
struct crypto_tfm;
struct crypto_type;
-struct aead_request;
-struct aead_givcrypt_request;
struct skcipher_givcrypt_request;
typedef void (*crypto_completion_t)(struct crypto_async_request *req, int err);
@@ -275,47 +266,6 @@ struct ablkcipher_alg {
};
/**
- * struct old_aead_alg - AEAD cipher definition
- * @maxauthsize: Set the maximum authentication tag size supported by the
- * transformation. A transformation may support smaller tag sizes.
- * As the authentication tag is a message digest to ensure the
- * integrity of the encrypted data, a consumer typically wants the
- * largest authentication tag possible as defined by this
- * variable.
- * @setauthsize: Set authentication size for the AEAD transformation. This
- * function is used to specify the consumer requested size of the
- * authentication tag to be either generated by the transformation
- * during encryption or the size of the authentication tag to be
- * supplied during the decryption operation. This function is also
- * responsible for checking the authentication tag size for
- * validity.
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @givencrypt: see struct ablkcipher_alg
- * @givdecrypt: see struct ablkcipher_alg
- * @geniv: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @givencrypt , @givdecrypt , @geniv and @ivsize are
- * mandatory and must be filled.
- */
-struct old_aead_alg {
- int (*setkey)(struct crypto_aead *tfm, const u8 *key,
- unsigned int keylen);
- int (*setauthsize)(struct crypto_aead *tfm, unsigned int authsize);
- int (*encrypt)(struct aead_request *req);
- int (*decrypt)(struct aead_request *req);
- int (*givencrypt)(struct aead_givcrypt_request *req);
- int (*givdecrypt)(struct aead_givcrypt_request *req);
-
- const char *geniv;
-
- unsigned int ivsize;
- unsigned int maxauthsize;
-};
-
-/**
* struct blkcipher_alg - synchronous block cipher definition
* @min_keysize: see struct ablkcipher_alg
* @max_keysize: see struct ablkcipher_alg
@@ -409,7 +359,6 @@ struct compress_alg {
#define cra_ablkcipher cra_u.ablkcipher
-#define cra_aead cra_u.aead
#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -460,7 +409,7 @@ struct compress_alg {
* struct crypto_type, which implements callbacks common for all
* transformation types. There are multiple options:
* &crypto_blkcipher_type, &crypto_ablkcipher_type,
- * &crypto_ahash_type, &crypto_aead_type, &crypto_rng_type.
+ * &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -508,7 +457,6 @@ struct crypto_alg {
union {
struct ablkcipher_alg ablkcipher;
- struct old_aead_alg aead;
struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;