diff options
author | Harald Freudenberger <freude@linux.ibm.com> | 2019-05-27 15:24:20 +0200 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2019-05-29 21:13:10 +0200 |
commit | 1c2c7029c008922d4d48902cc386250502e73d51 (patch) | |
tree | 51e5dafe573a32bfd593cfb4dd7f3608fe73ee56 /arch/s390/crypto/aes_s390.c | |
parent | bef9f0ba300a55d79a69aa172156072182176515 (diff) | |
download | linux-1c2c7029c008922d4d48902cc386250502e73d51.tar.bz2 |
s390/crypto: fix possible sleep during spinlock aquired
This patch fixes a complain about possible sleep during
spinlock aquired
"BUG: sleeping function called from invalid context at
include/crypto/algapi.h:426"
for the ctr(aes) and ctr(des) s390 specific ciphers.
Instead of using a spinlock this patch introduces a mutex
which is save to be held in sleeping context. Please note
a deadlock is not possible as mutex_trylock() is used.
Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
Reported-by: Julian Wiedmann <jwi@linux.ibm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Diffstat (limited to 'arch/s390/crypto/aes_s390.c')
-rw-r--r-- | arch/s390/crypto/aes_s390.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/s390/crypto/aes_s390.c b/arch/s390/crypto/aes_s390.c index 178c53036f7f..d00f84add5f4 100644 --- a/arch/s390/crypto/aes_s390.c +++ b/arch/s390/crypto/aes_s390.c @@ -27,14 +27,14 @@ #include <linux/module.h> #include <linux/cpufeature.h> #include <linux/init.h> -#include <linux/spinlock.h> +#include <linux/mutex.h> #include <linux/fips.h> #include <linux/string.h> #include <crypto/xts.h> #include <asm/cpacf.h> static u8 *ctrblk; -static DEFINE_SPINLOCK(ctrblk_lock); +static DEFINE_MUTEX(ctrblk_lock); static cpacf_mask_t km_functions, kmc_functions, kmctr_functions, kma_functions; @@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, unsigned int n, nbytes; int ret, locked; - locked = spin_trylock(&ctrblk_lock); + locked = mutex_trylock(&ctrblk_lock); ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE); while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) { @@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier, ret = blkcipher_walk_done(desc, walk, nbytes - n); } if (locked) - spin_unlock(&ctrblk_lock); + mutex_unlock(&ctrblk_lock); /* * final block may be < AES_BLOCK_SIZE, copy only nbytes */ |