diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 10:52:09 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-11-14 10:52:09 -0800 |
commit | 37dc79565c4b7e735f190eaa6ed5bb6eb3d3968a (patch) | |
tree | 4f20cc3c9240c5759f72bf560b596a809173ee29 /drivers/crypto/bcm/cipher.c | |
parent | 894025f24bd028942da3e602b87d9f7223109b14 (diff) | |
parent | 1d9ddde12e3c9bab7f3d3484eb9446315e3571ca (diff) | |
download | linux-37dc79565c4b7e735f190eaa6ed5bb6eb3d3968a.tar.bz2 |
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
Pull crypto updates from Herbert Xu:
"Here is the crypto update for 4.15:
API:
- Disambiguate EBUSY when queueing crypto request by adding ENOSPC.
This change touches code outside the crypto API.
- Reset settings when empty string is written to rng_current.
Algorithms:
- Add OSCCA SM3 secure hash.
Drivers:
- Remove old mv_cesa driver (replaced by marvell/cesa).
- Enable rfc3686/ecb/cfb/ofb AES in crypto4xx.
- Add ccm/gcm AES in crypto4xx.
- Add support for BCM7278 in iproc-rng200.
- Add hash support on Exynos in s5p-sss.
- Fix fallback-induced error in vmx.
- Fix output IV in atmel-aes.
- Fix empty GCM hash in mediatek.
Others:
- Fix DoS potential in lib/mpi.
- Fix potential out-of-order issues with padata"
* 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6: (162 commits)
lib/mpi: call cond_resched() from mpi_powm() loop
crypto: stm32/hash - Fix return issue on update
crypto: dh - Remove pointless checks for NULL 'p' and 'g'
crypto: qat - Clean up error handling in qat_dh_set_secret()
crypto: dh - Don't permit 'key' or 'g' size longer than 'p'
crypto: dh - Don't permit 'p' to be 0
crypto: dh - Fix double free of ctx->p
hwrng: iproc-rng200 - Add support for BCM7278
dt-bindings: rng: Document BCM7278 RNG200 compatible
crypto: chcr - Replace _manual_ swap with swap macro
crypto: marvell - Add a NULL entry at the end of mv_cesa_plat_id_table[]
hwrng: virtio - Virtio RNG devices need to be re-registered after suspend/resume
crypto: atmel - remove empty functions
crypto: ecdh - remove empty exit()
MAINTAINERS: update maintainer for qat
crypto: caam - remove unused param of ctx_map_to_sec4_sg()
crypto: caam - remove unneeded edesc zeroization
crypto: atmel-aes - Reset the controller before each use
crypto: atmel-aes - properly set IV after {en,de}crypt
hwrng: core - Reset user selected rng by writing "" to rng_current
...
Diffstat (limited to 'drivers/crypto/bcm/cipher.c')
-rw-r--r-- | drivers/crypto/bcm/cipher.c | 116 |
1 files changed, 52 insertions, 64 deletions
diff --git a/drivers/crypto/bcm/cipher.c b/drivers/crypto/bcm/cipher.c index 8685c7e4debd..ce70b44d0fb6 100644 --- a/drivers/crypto/bcm/cipher.c +++ b/drivers/crypto/bcm/cipher.c @@ -256,6 +256,44 @@ spu_ablkcipher_tx_sg_create(struct brcm_message *mssg, return 0; } +static int mailbox_send_message(struct brcm_message *mssg, u32 flags, + u8 chan_idx) +{ + int err; + int retry_cnt = 0; + struct device *dev = &(iproc_priv.pdev->dev); + + err = mbox_send_message(iproc_priv.mbox[chan_idx], mssg); + if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) { + while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { + /* + * Mailbox queue is full. Since MAY_SLEEP is set, assume + * not in atomic context and we can wait and try again. + */ + retry_cnt++; + usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); + err = mbox_send_message(iproc_priv.mbox[chan_idx], + mssg); + atomic_inc(&iproc_priv.mb_no_spc); + } + } + if (err < 0) { + atomic_inc(&iproc_priv.mb_send_fail); + return err; + } + + /* Check error returned by mailbox controller */ + err = mssg->error; + if (unlikely(err < 0)) { + dev_err(dev, "message error %d", err); + /* Signal txdone for mailbox channel */ + } + + /* Signal txdone for mailbox channel */ + mbox_client_txdone(iproc_priv.mbox[chan_idx], err); + return err; +} + /** * handle_ablkcipher_req() - Submit as much of a block cipher request as fits in * a single SPU request message, starting at the current position in the request @@ -293,7 +331,6 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) u32 pad_len; /* total length of all padding */ bool update_key = false; struct brcm_message *mssg; /* mailbox message */ - int retry_cnt = 0; /* number of entries in src and dst sg in mailbox message. */ u8 rx_frag_num = 2; /* response header and STATUS */ @@ -462,24 +499,9 @@ static int handle_ablkcipher_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (unlikely(err < 0)) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } return -EINPROGRESS; } @@ -710,7 +732,6 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) u32 spu_hdr_len; unsigned int digestsize; u16 rem = 0; - int retry_cnt = 0; /* * number of entries in src and dst sg. Always includes SPU msg header. @@ -904,24 +925,10 @@ static int handle_ahash_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (err < 0) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } + return -EINPROGRESS; } @@ -1320,7 +1327,6 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) int assoc_nents = 0; bool incl_icv = false; unsigned int digestsize = ctx->digestsize; - int retry_cnt = 0; /* number of entries in src and dst sg. Always includes SPU msg header. */ @@ -1367,11 +1373,11 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) * expects AAD to include just SPI and seqno. So * subtract off the IV len. */ - aead_parms.assoc_size -= GCM_ESP_IV_SIZE; + aead_parms.assoc_size -= GCM_RFC4106_IV_SIZE; if (rctx->is_encrypt) { aead_parms.return_iv = true; - aead_parms.ret_iv_len = GCM_ESP_IV_SIZE; + aead_parms.ret_iv_len = GCM_RFC4106_IV_SIZE; aead_parms.ret_iv_off = GCM_ESP_SALT_SIZE; } } else { @@ -1558,24 +1564,9 @@ static int handle_aead_req(struct iproc_reqctx_s *rctx) if (err) return err; - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], mssg); - if (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) { - while ((err == -ENOBUFS) && (retry_cnt < SPU_MB_RETRY_MAX)) { - /* - * Mailbox queue is full. Since MAY_SLEEP is set, assume - * not in atomic context and we can wait and try again. - */ - retry_cnt++; - usleep_range(MBOX_SLEEP_MIN, MBOX_SLEEP_MAX); - err = mbox_send_message(iproc_priv.mbox[rctx->chan_idx], - mssg); - atomic_inc(&iproc_priv.mb_no_spc); - } - } - if (err < 0) { - atomic_inc(&iproc_priv.mb_send_fail); + err = mailbox_send_message(mssg, req->base.flags, rctx->chan_idx); + if (unlikely(err < 0)) return err; - } return -EINPROGRESS; } @@ -3255,7 +3246,7 @@ static struct iproc_alg_s driver_algs[] = { .cra_flags = CRYPTO_ALG_NEED_FALLBACK }, .setkey = aead_gcm_esp_setkey, - .ivsize = GCM_ESP_IV_SIZE, + .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_info = { @@ -3301,7 +3292,7 @@ static struct iproc_alg_s driver_algs[] = { .cra_flags = CRYPTO_ALG_NEED_FALLBACK }, .setkey = rfc4543_gcm_esp_setkey, - .ivsize = GCM_ESP_IV_SIZE, + .ivsize = GCM_RFC4106_IV_SIZE, .maxauthsize = AES_BLOCK_SIZE, }, .cipher_info = { @@ -4537,7 +4528,7 @@ static int spu_mb_init(struct device *dev) mcl->dev = dev; mcl->tx_block = false; mcl->tx_tout = 0; - mcl->knows_txdone = false; + mcl->knows_txdone = true; mcl->rx_callback = spu_rx_callback; mcl->tx_done = NULL; @@ -4818,7 +4809,6 @@ static int spu_dt_read(struct platform_device *pdev) struct device *dev = &pdev->dev; struct spu_hw *spu = &iproc_priv.spu; struct resource *spu_ctrl_regs; - const struct of_device_id *match; const struct spu_type_subtype *matched_spu_type; struct device_node *dn = pdev->dev.of_node; int err, i; @@ -4826,14 +4816,12 @@ static int spu_dt_read(struct platform_device *pdev) /* Count number of mailbox channels */ spu->num_chan = of_count_phandle_with_args(dn, "mboxes", "#mbox-cells"); - match = of_match_device(of_match_ptr(bcm_spu_dt_ids), dev); - if (!match) { + matched_spu_type = of_device_get_match_data(dev); + if (!matched_spu_type) { dev_err(&pdev->dev, "Failed to match device\n"); return -ENODEV; } - matched_spu_type = match->data; - spu->spu_type = matched_spu_type->type; spu->spu_subtype = matched_spu_type->subtype; |