diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-23 09:54:19 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-02-23 09:54:19 -0800 |
commit | 5bcbe22ca47da04cda3a858cef67f55b550c1d13 (patch) | |
tree | 49bd61e32eb2d652085a49182436322a3e0e9840 /include | |
parent | 1db934a5b77a9e37c4742c704fde6af233187a98 (diff) | |
parent | 12cb3a1c4184f891d965d1f39f8cfcc9ef617647 (diff) | |
download | linux-5bcbe22ca47da04cda3a858cef67f55b550c1d13.tar.bz2 |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto update from Herbert Xu:
"API:
- Try to catch hash output overrun in testmgr
- Introduce walksize attribute for batched walking
- Make crypto_xor() and crypto_inc() alignment agnostic
Algorithms:
- Add time-invariant AES algorithm
- Add standalone CBCMAC algorithm
Drivers:
- Add NEON acclerated chacha20 on ARM/ARM64
- Expose AES-CTR as synchronous skcipher on ARM64
- Add scalar AES implementation on ARM64
- Improve scalar AES implementation on ARM
- Improve NEON AES implementation on ARM/ARM64
- Merge CRC32 and PMULL instruction based drivers on ARM64
- Add NEON acclerated CBCMAC/CMAC/XCBC AES on ARM64
- Add IPsec AUTHENC implementation in atmel
- Add Support for Octeon-tx CPT Engine
- Add Broadcom SPU driver
- Add MediaTek driver"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (142 commits)
crypto: xts - Add ECB dependency
crypto: cavium - switch to pci_alloc_irq_vectors
crypto: cavium - switch to pci_alloc_irq_vectors
crypto: cavium - remove dead MSI-X related define
crypto: brcm - Avoid double free in ahash_finup()
crypto: cavium - fix Kconfig dependencies
crypto: cavium - cpt_bind_vq_to_grp could return an error code
crypto: doc - fix typo
hwrng: omap - update Kconfig help description
crypto: ccm - drop unnecessary minimum 32-bit alignment
crypto: ccm - honour alignmask of subordinate MAC cipher
crypto: caam - fix state buffer DMA (un)mapping
crypto: caam - abstract ahash request double buffering
crypto: caam - fix error path for ctx_dma mapping failure
crypto: caam - fix DMA API leaks for multiple setkey() calls
crypto: caam - don't dma_map key for hash algorithms
crypto: caam - use dma_map_sg() return code
crypto: caam - replace sg_count() with sg_nents_for_len()
crypto: caam - check sg_count() return value
crypto: caam - fix HW S/G in ablkcipher_giv_edesc_alloc()
..
Diffstat (limited to 'include')
-rw-r--r-- | include/crypto/algapi.h | 20 | ||||
-rw-r--r-- | include/crypto/chacha20.h | 6 | ||||
-rw-r--r-- | include/crypto/hash.h | 18 | ||||
-rw-r--r-- | include/crypto/internal/skcipher.h | 2 | ||||
-rw-r--r-- | include/crypto/skcipher.h | 34 | ||||
-rw-r--r-- | include/linux/compiler-gcc.h | 1 | ||||
-rw-r--r-- | include/linux/miscdevice.h | 1 |
7 files changed, 70 insertions, 12 deletions
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h index 404e9558e879..ebe4ded0c55d 100644 --- a/include/crypto/algapi.h +++ b/include/crypto/algapi.h @@ -191,9 +191,25 @@ static inline unsigned int crypto_queue_len(struct crypto_queue *queue) return queue->qlen; } -/* These functions require the input/output to be aligned as u32. */ void crypto_inc(u8 *a, unsigned int size); -void crypto_xor(u8 *dst, const u8 *src, unsigned int size); +void __crypto_xor(u8 *dst, const u8 *src, unsigned int size); + +static inline void crypto_xor(u8 *dst, const u8 *src, unsigned int size) +{ + if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && + __builtin_constant_p(size) && + (size % sizeof(unsigned long)) == 0) { + unsigned long *d = (unsigned long *)dst; + unsigned long *s = (unsigned long *)src; + + while (size > 0) { + *d++ ^= *s++; + size -= sizeof(unsigned long); + } + } else { + __crypto_xor(dst, src, size); + } +} int blkcipher_walk_done(struct blkcipher_desc *desc, struct blkcipher_walk *walk, int err); diff --git a/include/crypto/chacha20.h b/include/crypto/chacha20.h index 20d20f681a72..445fc45f4b5b 100644 --- a/include/crypto/chacha20.h +++ b/include/crypto/chacha20.h @@ -5,6 +5,7 @@ #ifndef _CRYPTO_CHACHA20_H #define _CRYPTO_CHACHA20_H +#include <crypto/skcipher.h> #include <linux/types.h> #include <linux/crypto.h> @@ -18,9 +19,8 @@ struct chacha20_ctx { void chacha20_block(u32 *state, void *stream); void crypto_chacha20_init(u32 *state, struct chacha20_ctx *ctx, u8 *iv); -int crypto_chacha20_setkey(struct crypto_tfm *tfm, const u8 *key, +int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key, unsigned int keysize); -int crypto_chacha20_crypt(struct blkcipher_desc *desc, struct scatterlist *dst, - struct scatterlist *src, unsigned int nbytes); +int crypto_chacha20_crypt(struct skcipher_request *req); #endif diff --git a/include/crypto/hash.h b/include/crypto/hash.h index 216a2b876147..b5727bcd2336 100644 --- a/include/crypto/hash.h +++ b/include/crypto/hash.h @@ -329,6 +329,16 @@ static inline unsigned int crypto_ahash_digestsize(struct crypto_ahash *tfm) return crypto_hash_alg_common(tfm)->digestsize; } +/** + * crypto_ahash_statesize() - obtain size of the ahash state + * @tfm: cipher handle + * + * Return the size of the ahash state. With the crypto_ahash_export() + * function, the caller can export the state into a buffer whose size is + * defined with this function. + * + * Return: size of the ahash state + */ static inline unsigned int crypto_ahash_statesize(struct crypto_ahash *tfm) { return crypto_hash_alg_common(tfm)->statesize; @@ -369,11 +379,7 @@ static inline struct crypto_ahash *crypto_ahash_reqtfm( * crypto_ahash_reqsize() - obtain size of the request data structure * @tfm: cipher handle * - * Return the size of the ahash state size. With the crypto_ahash_export - * function, the caller can export the state into a buffer whose size is - * defined with this function. - * - * Return: size of the ahash state + * Return: size of the request data */ static inline unsigned int crypto_ahash_reqsize(struct crypto_ahash *tfm) { @@ -453,7 +459,7 @@ int crypto_ahash_digest(struct ahash_request *req); * * This function exports the hash state of the ahash_request handle into the * caller-allocated output buffer out which must have sufficient size (e.g. by - * calling crypto_ahash_reqsize). + * calling crypto_ahash_statesize()). * * Return: 0 if the export was successful; < 0 if an error occurred */ diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h index 8735979ed341..e42f7063f245 100644 --- a/include/crypto/internal/skcipher.h +++ b/include/crypto/internal/skcipher.h @@ -66,7 +66,7 @@ struct skcipher_walk { int flags; unsigned int blocksize; - unsigned int chunksize; + unsigned int stride; unsigned int alignmask; }; diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h index 750b14f1ada4..562001cb412b 100644 --- a/include/crypto/skcipher.h +++ b/include/crypto/skcipher.h @@ -115,6 +115,9 @@ struct crypto_skcipher { * IV of exactly that size to perform the encrypt or decrypt operation. * @chunksize: Equal to the block size except for stream ciphers such as * CTR where it is set to the underlying block size. + * @walksize: Equal to the chunk size except in cases where the algorithm is + * considerably more efficient if it can operate on multiple chunks + * in parallel. Should be a multiple of chunksize. * @base: Definition of a generic crypto algorithm. * * All fields except @ivsize are mandatory and must be filled. @@ -131,6 +134,7 @@ struct skcipher_alg { unsigned int max_keysize; unsigned int ivsize; unsigned int chunksize; + unsigned int walksize; struct crypto_alg base; }; @@ -289,6 +293,19 @@ static inline unsigned int crypto_skcipher_alg_chunksize( return alg->chunksize; } +static inline unsigned int crypto_skcipher_alg_walksize( + struct skcipher_alg *alg) +{ + if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) == + CRYPTO_ALG_TYPE_BLKCIPHER) + return alg->base.cra_blocksize; + + if (alg->base.cra_ablkcipher.encrypt) + return alg->base.cra_blocksize; + + return alg->walksize; +} + /** * crypto_skcipher_chunksize() - obtain chunk size * @tfm: cipher handle @@ -307,6 +324,23 @@ static inline unsigned int crypto_skcipher_chunksize( } /** + * crypto_skcipher_walksize() - obtain walk size + * @tfm: cipher handle + * + * In some cases, algorithms can only perform optimally when operating on + * multiple blocks in parallel. This is reflected by the walksize, which + * must be a multiple of the chunksize (or equal if the concern does not + * apply) + * + * Return: walk size in bytes + */ +static inline unsigned int crypto_skcipher_walksize( + struct crypto_skcipher *tfm) +{ + return crypto_skcipher_alg_walksize(crypto_skcipher_alg(tfm)); +} + +/** * crypto_skcipher_blocksize() - obtain block size of cipher * @tfm: cipher handle * diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h index 0444b1336268..fddd1a5eb322 100644 --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h @@ -116,6 +116,7 @@ */ #define __pure __attribute__((pure)) #define __aligned(x) __attribute__((aligned(x))) +#define __aligned_largest __attribute__((aligned)) #define __printf(a, b) __attribute__((format(printf, a, b))) #define __scanf(a, b) __attribute__((format(scanf, a, b))) #define __attribute_const__ __attribute__((__const__)) diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h index 0590263c462c..762b5fec3383 100644 --- a/include/linux/miscdevice.h +++ b/include/linux/miscdevice.h @@ -32,6 +32,7 @@ #define SGI_MMTIMER 153 #define STORE_QUEUE_MINOR 155 /* unused */ #define I2O_MINOR 166 +#define HWRNG_MINOR 183 #define MICROCODE_MINOR 184 #define IRNET_MINOR 187 #define VFIO_MINOR 196 |