Skip to content

Commit

Permalink
s390/crypto: fix possible sleep during spinlock aquired
Browse files Browse the repository at this point in the history
This patch fixes a complain about possible sleep during
spinlock aquired
"BUG: sleeping function called from invalid context at
include/crypto/algapi.h:426"
for the ctr(aes) and ctr(des) s390 specific ciphers.

Instead of using a spinlock this patch introduces a mutex
which is save to be held in sleeping context. Please note
a deadlock is not possible as mutex_trylock() is used.

Signed-off-by: Harald Freudenberger <freude@linux.ibm.com>
Reported-by: Julian Wiedmann <jwi@linux.ibm.com>
Cc: stable@vger.kernel.org
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
  • Loading branch information
hfreude authored and heicarst committed May 29, 2019
1 parent bef9f0b commit 1c2c702
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 7 deletions.
8 changes: 4 additions & 4 deletions arch/s390/crypto/aes_s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -27,14 +27,14 @@
#include <linux/module.h>
#include <linux/cpufeature.h>
#include <linux/init.h>
#include <linux/spinlock.h>
#include <linux/mutex.h>
#include <linux/fips.h>
#include <linux/string.h>
#include <crypto/xts.h>
#include <asm/cpacf.h>

static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static DEFINE_MUTEX(ctrblk_lock);

static cpacf_mask_t km_functions, kmc_functions, kmctr_functions,
kma_functions;
Expand Down Expand Up @@ -698,7 +698,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
unsigned int n, nbytes;
int ret, locked;

locked = spin_trylock(&ctrblk_lock);
locked = mutex_trylock(&ctrblk_lock);

ret = blkcipher_walk_virt_block(desc, walk, AES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= AES_BLOCK_SIZE) {
Expand All @@ -716,7 +716,7 @@ static int ctr_aes_crypt(struct blkcipher_desc *desc, unsigned long modifier,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (locked)
spin_unlock(&ctrblk_lock);
mutex_unlock(&ctrblk_lock);
/*
* final block may be < AES_BLOCK_SIZE, copy only nbytes
*/
Expand Down
7 changes: 4 additions & 3 deletions arch/s390/crypto/des_s390.c
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,15 @@
#include <linux/cpufeature.h>
#include <linux/crypto.h>
#include <linux/fips.h>
#include <linux/mutex.h>
#include <crypto/algapi.h>
#include <crypto/des.h>
#include <asm/cpacf.h>

#define DES3_KEY_SIZE (3 * DES_KEY_SIZE)

static u8 *ctrblk;
static DEFINE_SPINLOCK(ctrblk_lock);
static DEFINE_MUTEX(ctrblk_lock);

static cpacf_mask_t km_functions, kmc_functions, kmctr_functions;

Expand Down Expand Up @@ -374,7 +375,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
unsigned int n, nbytes;
int ret, locked;

locked = spin_trylock(&ctrblk_lock);
locked = mutex_trylock(&ctrblk_lock);

ret = blkcipher_walk_virt_block(desc, walk, DES_BLOCK_SIZE);
while ((nbytes = walk->nbytes) >= DES_BLOCK_SIZE) {
Expand All @@ -391,7 +392,7 @@ static int ctr_desall_crypt(struct blkcipher_desc *desc, unsigned long fc,
ret = blkcipher_walk_done(desc, walk, nbytes - n);
}
if (locked)
spin_unlock(&ctrblk_lock);
mutex_unlock(&ctrblk_lock);
/* final block may be < DES_BLOCK_SIZE, copy only nbytes */
if (nbytes) {
cpacf_kmctr(fc, ctx->key, buf, walk->src.virt.addr,
Expand Down

0 comments on commit 1c2c702

Please sign in to comment.